diff options
Diffstat (limited to 'include/linux')
85 files changed, 1506 insertions, 695 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 9426b9aaed86..0fecacca51e8 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -994,62 +994,11 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c #endif #endif -struct acpi_gpio_params { - unsigned int crs_entry_index; - unsigned int line_index; - bool active_low; -}; - -struct acpi_gpio_mapping { - const char *name; - const struct acpi_gpio_params *data; - unsigned int size; - -/* Ignore IoRestriction field */ -#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) -/* - * When ACPI GPIO mapping table is in use the index parameter inside it - * refers to the GPIO resource in _CRS method. That index has no - * distinction of actual type of the resource. When consumer wants to - * get GpioIo type explicitly, this quirk may be used. - */ -#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) - - unsigned int quirks; -}; - #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) -int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios); - -static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) -{ - if (adev) - adev->driver_gpios = NULL; -} - -int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios); -void devm_acpi_dev_remove_driver_gpios(struct device *dev); - bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index); #else -static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, - const struct acpi_gpio_mapping *gpios) -{ - return -ENXIO; -} -static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} - -static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, - const struct acpi_gpio_mapping *gpios) -{ - return -ENXIO; -} -static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} - static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { @@ -1302,11 +1251,16 @@ static inline int lpit_read_residency_count_address(u64 *address) #endif #ifdef CONFIG_ACPI_PPTT +int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cpu_cache_topology(unsigned int cpu, int level); #else +static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return -EINVAL; +} static inline int find_acpi_cpu_topology(unsigned int cpu, int level) { return -EINVAL; diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 0760ca1cb009..74748e306f4b 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -5,7 +5,8 @@ #include <linux/time.h> #include <linux/hrtimer.h> #include <linux/timerqueue.h> -#include <linux/rtc.h> + +struct rtc_device; enum alarmtimer_type { ALARM_REALTIME, diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 4a4d00646040..21e950e4ab62 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -184,6 +184,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32)); extern int amd_iommu_update_ga(int cpu, bool is_run, void *data); +extern int amd_iommu_activate_guest_mode(void *data); +extern int amd_iommu_deactivate_guest_mode(void *data); + #else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ static inline int @@ -198,6 +201,15 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data) return 0; } +static inline int amd_iommu_activate_guest_mode(void *data) +{ + return 0; +} + +static inline int amd_iommu_deactivate_guest_mode(void *data) +{ + return 0; +} #endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 1cfe05ea1d89..42f2b5126094 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -33,4 +33,30 @@ unsigned long topology_get_freq_scale(int cpu) return per_cpu(freq_scale, cpu); } +struct cpu_topology { + int thread_id; + int core_id; + int package_id; + int llc_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t llc_sibling; +}; + +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); +void update_siblings_masks(unsigned int cpu); +void remove_cpu_topology(unsigned int cpuid); +void reset_cpu_topology(void); +#endif + #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index f58e97446abc..90528f12bdfa 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -120,6 +120,10 @@ extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); extern int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); +extern bool __pure __bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits); extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits); extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, @@ -321,6 +325,25 @@ static inline int bitmap_equal(const unsigned long *src1, return __bitmap_equal(src1, src2, nbits); } +/** + * bitmap_or_equal - Check whether the or of two bitnaps is equal to a third + * @src1: Pointer to bitmap 1 + * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 + * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 + * + * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise + */ +static inline bool bitmap_or_equal(const unsigned long *src1, + const unsigned long *src2, + const unsigned long *src3, + unsigned int nbits) +{ + if (!small_const_nbits(nbits)) + return __bitmap_or_equal(src1, src2, src3, nbits); + + return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); +} + static inline int bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index feff3fe4467e..1b1fa1557e68 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -311,6 +311,7 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ + __REQ_NOWAIT_INLINE, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread @@ -345,6 +346,7 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) +#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) @@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U +#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE; + return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/bug.h b/include/linux/bug.h index fe5916550da8..f639bd0122f3 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -47,6 +47,11 @@ void generic_bug_clear_once(void); #else /* !CONFIG_GENERIC_BUG */ +static inline void *find_bug(unsigned long bugaddr) +{ + return NULL; +} + static inline enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs) { diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f6b048902d6c..3ba3e6da13a6 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -150,6 +150,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, struct cgroup_subsys_state **dst_cssp); +void cgroup_enable_task_cg_lists(void); void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, struct css_task_iter *it); struct task_struct *css_task_iter_next(struct css_task_iter *it); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 2ae7604783dd..dce5521a9bf6 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -817,6 +817,7 @@ unsigned int clk_hw_get_num_parents(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw); struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index); +int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); unsigned long __clk_get_flags(struct clk *clk); diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb9a0db89f1a..12ae4b87494e 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -256,7 +256,10 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} #endif -#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ + idx, \ + state, \ + is_retention) \ ({ \ int __ret = 0; \ \ @@ -268,7 +271,7 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) if (!is_retention) \ __ret = cpu_pm_enter(); \ if (!__ret) { \ - __ret = low_level_idle_enter(idx); \ + __ret = low_level_idle_enter(state); \ if (!is_retention) \ cpu_pm_exit(); \ } \ @@ -277,9 +280,15 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) }) #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) + +#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) + +#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21755471b1c3..b5a5a1ed9efd 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/bitmap.h> +#include <linux/atomic.h> #include <linux/bug.h> /* Don't assign or return these: may not be this big! */ @@ -95,8 +96,21 @@ extern struct cpumask __cpu_active_mask; #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) +extern atomic_t __num_online_cpus; + #if NR_CPUS > 1 -#define num_online_cpus() cpumask_weight(cpu_online_mask) +/** + * num_online_cpus() - Read the number of online CPUs + * + * Despite the fact that __num_online_cpus is of type atomic_t, this + * interface gives only a momentary snapshot and is not protected against + * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held + * region. + */ +static inline unsigned int num_online_cpus(void) +{ + return atomic_read(&__num_online_cpus); +} #define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_active_cpus() cpumask_weight(cpu_active_mask) @@ -115,6 +129,8 @@ extern struct cpumask __cpu_active_mask; #define cpu_active(cpu) ((cpu) == 0) #endif +extern cpumask_t cpus_booted_once_mask; + static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) { #ifdef CONFIG_DEBUG_PER_CPU_MAPS @@ -474,6 +490,20 @@ static inline bool cpumask_equal(const struct cpumask *src1p, } /** + * cpumask_or_equal - *src1p | *src2p == *src3p + * @src1p: the first input + * @src2p: the second input + * @src3p: the third input + */ +static inline bool cpumask_or_equal(const struct cpumask *src1p, + const struct cpumask *src2p, + const struct cpumask *src3p) +{ + return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), + cpumask_bits(src3p), nr_cpumask_bits); +} + +/** * cpumask_intersects - (*src1p & *src2p) != 0 * @src1p: the first input * @src2p: the second input @@ -805,14 +835,7 @@ set_cpu_present(unsigned int cpu, bool present) cpumask_clear_cpu(cpu, &__cpu_present_mask); } -static inline void -set_cpu_online(unsigned int cpu, bool online) -{ - if (online) - cpumask_set_cpu(cpu, &__cpu_online_mask); - else - cpumask_clear_cpu(cpu, &__cpu_online_mask); -} +void set_cpu_online(unsigned int cpu, bool online); static inline void set_cpu_active(unsigned int cpu, bool active) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 934633a05d20..04c20de66afc 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -40,14 +40,14 @@ static inline bool cpusets_enabled(void) static inline void cpuset_inc(void) { - static_branch_inc(&cpusets_pre_enable_key); - static_branch_inc(&cpusets_enabled_key); + static_branch_inc_cpuslocked(&cpusets_pre_enable_key); + static_branch_inc_cpuslocked(&cpusets_enabled_key); } static inline void cpuset_dec(void) { - static_branch_dec(&cpusets_enabled_key); - static_branch_dec(&cpusets_pre_enable_key); + static_branch_dec_cpuslocked(&cpusets_enabled_key); + static_branch_dec_cpuslocked(&cpusets_pre_enable_key); } extern int cpuset_init(void); @@ -55,6 +55,8 @@ extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void cpuset_wait_for_hotplug(void); +extern void cpuset_read_lock(void); +extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern void cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); @@ -176,6 +178,9 @@ static inline void cpuset_update_active_cpus(void) static inline void cpuset_wait_for_hotplug(void) { } +static inline void cpuset_read_lock(void) { } +static inline void cpuset_read_unlock(void) { } + static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { diff --git a/include/linux/edac.h b/include/linux/edac.h index 342dabda9c7e..c19483b90079 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -440,7 +440,7 @@ struct dimm_info { char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ /* Memory location data */ - unsigned location[EDAC_MAX_LAYERS]; + unsigned int location[EDAC_MAX_LAYERS]; struct mem_ctl_info *mci; /* the parent */ @@ -451,7 +451,7 @@ struct dimm_info { u32 nr_pages; /* number of pages on this dimm */ - unsigned csrow, cschannel; /* Points to the old API data */ + unsigned int csrow, cschannel; /* Points to the old API data */ u16 smbios_handle; /* Handle for SMBIOS type 17 */ }; @@ -597,7 +597,7 @@ struct mem_ctl_info { unsigned long page); int mc_idx; struct csrow_info **csrows; - unsigned nr_csrows, num_cschannel; + unsigned int nr_csrows, num_cschannel; /* * Memory Controller hierarchy @@ -608,14 +608,14 @@ struct mem_ctl_info { * of the recent drivers enumerate memories per DIMM, instead. * When the memory controller is per rank, csbased is true. */ - unsigned n_layers; + unsigned int n_layers; struct edac_mc_layer *layers; bool csbased; /* * DIMM info. Will eventually remove the entire csrows_info some day */ - unsigned tot_dimms; + unsigned int tot_dimms; struct dimm_info **dimms; /* diff --git a/include/linux/efi.h b/include/linux/efi.h index f87fabea4a85..bd3837022307 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -692,6 +692,9 @@ void efi_native_runtime_setup(void); #define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25) #define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2) +/* OEM GUIDs */ +#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55) + typedef struct { efi_guid_t guid; u64 table; @@ -984,11 +987,9 @@ extern struct efi { unsigned long acpi20; /* ACPI table (ACPI 2.0) */ unsigned long smbios; /* SMBIOS table (32 bit entry point) */ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */ - unsigned long sal_systab; /* SAL system table */ unsigned long boot_info; /* boot info table */ unsigned long hcdp; /* HCDP table */ unsigned long uga; /* UGA table */ - unsigned long uv_systab; /* UV system table */ unsigned long fw_vendor; /* fw_vendor */ unsigned long runtime; /* runtime table */ unsigned long config_table; /* config tables */ @@ -1211,8 +1212,6 @@ static inline bool efi_enabled(int feature) return test_bit(feature, &efi.flags) != 0; } extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); - -extern bool efi_is_table_address(unsigned long phys_addr); #else static inline bool efi_enabled(int feature) { @@ -1226,11 +1225,6 @@ efi_capsule_pending(int *reset_type) { return false; } - -static inline bool efi_is_table_address(unsigned long phys_addr) -{ - return false; -} #endif extern int efi_status_to_err(efi_status_t status); @@ -1722,6 +1716,8 @@ struct efi_tcg2_final_events_table { }; extern int efi_tpm_final_log_size; +extern unsigned long rci2_table_phys; + /* * efi_runtime_service() function identifiers. * "NONE" is used by efi_recover_from_page_fault() to check if the page diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h index 280c61ecbf20..635a95caf29f 100644 --- a/include/linux/error-injection.h +++ b/include/linux/error-injection.h @@ -2,16 +2,16 @@ #ifndef _LINUX_ERROR_INJECTION_H #define _LINUX_ERROR_INJECTION_H -#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#include <linux/compiler.h> +#include <asm-generic/error-injection.h> -#include <asm/error-injection.h> +#ifdef CONFIG_FUNCTION_ERROR_INJECTION extern bool within_error_injection_list(unsigned long addr); extern int get_injectable_error_type(unsigned long addr); #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ -#include <asm-generic/error-injection.h> static inline bool within_error_injection_list(unsigned long addr) { return false; diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h new file mode 100644 index 000000000000..7562099c9e46 --- /dev/null +++ b/include/linux/firmware/imx/dsp.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2019 NXP + * + * Header file for the DSP IPC implementation + */ + +#ifndef _IMX_DSP_IPC_H +#define _IMX_DSP_IPC_H + +#include <linux/device.h> +#include <linux/types.h> +#include <linux/mailbox_client.h> + +#define DSP_MU_CHAN_NUM 4 + +struct imx_dsp_chan { + struct imx_dsp_ipc *ipc; + struct mbox_client cl; + struct mbox_chan *ch; + char *name; + int idx; +}; + +struct imx_dsp_ops { + void (*handle_reply)(struct imx_dsp_ipc *ipc); + void (*handle_request)(struct imx_dsp_ipc *ipc); +}; + +struct imx_dsp_ipc { + /* Host <-> DSP communication uses 2 txdb and 2 rxdb channels */ + struct imx_dsp_chan chans[DSP_MU_CHAN_NUM]; + struct device *dev; + struct imx_dsp_ops *ops; + void *private_data; +}; + +static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data) +{ + if (!ipc) + return; + + ipc->private_data = data; +} + +static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc) +{ + if (!ipc) + return NULL; + + return ipc->private_data; +} + +#if IS_ENABLED(CONFIG_IMX_DSP) + +int imx_dsp_ring_doorbell(struct imx_dsp_ipc *dsp, unsigned int chan_idx); + +#else + +static inline int imx_dsp_ring_doorbell(struct imx_dsp_ipc *ipc, + unsigned int chan_idx) +{ + return -ENOTSUPP; +} + +#endif +#endif /* _IMX_DSP_IPC_H */ diff --git a/include/linux/gpio.h b/include/linux/gpio.h index f757a58191a6..2157717c2136 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -221,19 +221,6 @@ static inline int gpio_to_irq(unsigned gpio) return -EINVAL; } -static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, - unsigned int offset) -{ - WARN_ON(1); - return -EINVAL; -} - -static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, - unsigned int offset) -{ - WARN_ON(1); -} - static inline int irq_to_gpio(unsigned irq) { /* irq can never have been returned from gpio_to_irq() */ diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index a7f08fb0f865..b70af921c614 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -170,18 +170,8 @@ struct gpio_desc *gpio_to_desc(unsigned gpio); int desc_to_gpio(const struct gpio_desc *desc); /* Child properties interface */ -struct device_node; struct fwnode_handle; -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); -struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label); struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, @@ -530,29 +520,9 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) } /* Child properties interface */ -struct device_node; struct fwnode_handle; static inline -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -static inline -struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, - const char *propname, int index, - enum gpiod_flags dflags, - const char *label) -{ - return ERR_PTR(-ENOSYS); -} - -static inline struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, const char *propname, int index, enum gpiod_flags dflags, @@ -584,6 +554,111 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, flags, label); } +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) +struct device_node; + +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); + +#else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ + +struct device_node; + +static inline +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */ + +#ifdef CONFIG_GPIOLIB +struct device_node; + +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); + +#else /* CONFIG_GPIOLIB */ + +struct device_node; + +static inline +struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, + struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_GPIOLIB */ + +struct acpi_gpio_params { + unsigned int crs_entry_index; + unsigned int line_index; + bool active_low; +}; + +struct acpi_gpio_mapping { + const char *name; + const struct acpi_gpio_params *data; + unsigned int size; + +/* Ignore IoRestriction field */ +#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) +/* + * When ACPI GPIO mapping table is in use the index parameter inside it + * refers to the GPIO resource in _CRS method. That index has no + * distinction of actual type of the resource. When consumer wants to + * get GpioIo type explicitly, this quirk may be used. + */ +#define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) + + unsigned int quirks; +}; + +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) + +struct acpi_device; + +int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios); +void acpi_dev_remove_driver_gpios(struct acpi_device *adev); + +int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios); +void devm_acpi_dev_remove_driver_gpios(struct device *dev); + +#else /* CONFIG_GPIOLIB && CONFIG_ACPI */ + +struct acpi_device; + +static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {} + +static inline int devm_acpi_dev_add_driver_gpios(struct device *dev, + const struct acpi_gpio_mapping *gpios) +{ + return -ENXIO; +} +static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {} + +#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */ + + #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) int gpiod_export(struct gpio_desc *desc, bool direction_may_change); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 6a0e420915a3..f8245d67f070 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -20,9 +20,8 @@ struct module; enum gpiod_flags; enum gpio_lookup_flags; -#ifdef CONFIG_GPIOLIB +struct gpio_chip; -#ifdef CONFIG_GPIOLIB_IRQCHIP /** * struct gpio_irq_chip - GPIO interrupt controller */ @@ -49,6 +48,84 @@ struct gpio_irq_chip { */ const struct irq_domain_ops *domain_ops; +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + /** + * @fwnode: + * + * Firmware node corresponding to this gpiochip/irqchip, necessary + * for hierarchical irqdomain support. + */ + struct fwnode_handle *fwnode; + + /** + * @parent_domain: + * + * If non-NULL, will be set as the parent of this GPIO interrupt + * controller's IRQ domain to establish a hierarchical interrupt + * domain. The presence of this will activate the hierarchical + * interrupt support. + */ + struct irq_domain *parent_domain; + + /** + * @child_to_parent_hwirq: + * + * This callback translates a child hardware IRQ offset to a parent + * hardware IRQ offset on a hierarchical interrupt chip. The child + * hardware IRQs correspond to the GPIO index 0..ngpio-1 (see the + * ngpio field of struct gpio_chip) and the corresponding parent + * hardware IRQ and type (such as IRQ_TYPE_*) shall be returned by + * the driver. The driver can calculate this from an offset or using + * a lookup table or whatever method is best for this chip. Return + * 0 on successful translation in the driver. + * + * If some ranges of hardware IRQs do not have a corresponding parent + * HWIRQ, return -EINVAL, but also make sure to fill in @valid_mask and + * @need_valid_mask to make these GPIO lines unavailable for + * translation. + */ + int (*child_to_parent_hwirq)(struct gpio_chip *chip, + unsigned int child_hwirq, + unsigned int child_type, + unsigned int *parent_hwirq, + unsigned int *parent_type); + + /** + * @populate_parent_fwspec: + * + * This optional callback populates the &struct irq_fwspec for the + * parent's IRQ domain. If this is not specified, then + * &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell + * variant named &gpiochip_populate_parent_fwspec_fourcell is also + * available. + */ + void (*populate_parent_fwspec)(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); + + /** + * @child_offset_to_irq: + * + * This optional callback is used to translate the child's GPIO line + * offset on the GPIO chip to an IRQ number for the GPIO to_irq() + * callback. If this is not specified, then a default callback will be + * provided that returns the line offset. + */ + unsigned int (*child_offset_to_irq)(struct gpio_chip *chip, + unsigned int pin); + + /** + * @child_irq_domain_ops: + * + * The IRQ domain operations that will be used for this GPIO IRQ + * chip. If no operations are provided, then default callbacks will + * be populated to setup the IRQ hierarchy. Some drivers need to + * supply their own translate function. + */ + struct irq_domain_ops child_irq_domain_ops; +#endif + /** * @handler: * @@ -125,11 +202,17 @@ struct gpio_irq_chip { bool threaded; /** - * @need_valid_mask: - * - * If set core allocates @valid_mask with all bits set to one. + * @init_valid_mask: optional routine to initialize @valid_mask, to be + * used if not all GPIO lines are valid interrupts. Sometimes some + * lines just cannot fire interrupts, and this routine, when defined, + * is passed a bitmap in "valid_mask" and it will have ngpios + * bits from 0..(ngpios-1) set to "1" as in valid. The callback can + * then directly set some bits to "0" if they cannot be used for + * interrupts. */ - bool need_valid_mask; + void (*init_valid_mask)(struct gpio_chip *chip, + unsigned long *valid_mask, + unsigned int ngpios); /** * @valid_mask: @@ -161,7 +244,6 @@ struct gpio_irq_chip { */ void (*irq_disable)(struct irq_data *data); }; -#endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * struct gpio_chip - abstract a GPIO controller @@ -282,7 +364,9 @@ struct gpio_chip { void (*dbg_show)(struct seq_file *s, struct gpio_chip *chip); - int (*init_valid_mask)(struct gpio_chip *chip); + int (*init_valid_mask)(struct gpio_chip *chip, + unsigned long *valid_mask, + unsigned int ngpios); int base; u16 ngpio; @@ -321,15 +405,6 @@ struct gpio_chip { #endif /* CONFIG_GPIOLIB_IRQCHIP */ /** - * @need_valid_mask: - * - * If set core allocates @valid_mask with all its values initialized - * with init_valid_mask() or set to one if init_valid_mask() is not - * defined - */ - bool need_valid_mask; - - /** * @valid_mask: * * If not %NULL holds bitmask of GPIOs which are valid to be used @@ -421,9 +496,6 @@ extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *chip, void *data)); -/* lock/unlock as IRQ */ -int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); -void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset); int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset); void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset); @@ -441,15 +513,40 @@ bool gpiochip_line_is_valid(const struct gpio_chip *chip, unsigned int offset); /* get driver data */ void *gpiochip_get_data(struct gpio_chip *chip); -struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); - struct bgpio_pdata { const char *label; int base; int ngpio; }; -#if IS_ENABLED(CONFIG_GPIO_GENERIC) +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY + +void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); +void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type); + +#else + +static inline void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type) +{ +} + +static inline void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, + struct irq_fwspec *fwspec, + unsigned int parent_hwirq, + unsigned int parent_type) +{ +} + +#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ int bgpio_init(struct gpio_chip *gc, struct device *dev, unsigned long sz, void __iomem *dat, void __iomem *set, @@ -463,10 +560,6 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev, #define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */ #define BGPIOF_NO_OUTPUT BIT(5) /* only input */ -#endif /* CONFIG_GPIO_GENERIC */ - -#ifdef CONFIG_GPIOLIB_IRQCHIP - int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); @@ -555,15 +648,11 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, } #endif /* CONFIG_LOCKDEP */ -#endif /* CONFIG_GPIOLIB_IRQCHIP */ - int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset); void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset); int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, unsigned long config); -#ifdef CONFIG_PINCTRL - /** * struct gpio_pin_range - pin range controlled by a gpio chip * @node: list for maintaining set of pin ranges, used internally @@ -576,6 +665,8 @@ struct gpio_pin_range { struct pinctrl_gpio_range range; }; +#ifdef CONFIG_PINCTRL + int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins); @@ -586,8 +677,6 @@ void gpiochip_remove_pin_ranges(struct gpio_chip *chip); #else /* ! CONFIG_PINCTRL */ -struct pinctrl_dev; - static inline int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, @@ -619,6 +708,15 @@ void gpiochip_free_own_desc(struct gpio_desc *desc); void devprop_gpiochip_set_names(struct gpio_chip *chip, const struct fwnode_handle *fwnode); +#ifdef CONFIG_GPIOLIB + +/* lock/unlock as IRQ */ +int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset); +void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); + + +struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); + #else /* CONFIG_GPIOLIB */ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) @@ -628,6 +726,18 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) return ERR_PTR(-ENODEV); } +static inline int gpiochip_lock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); + return -EINVAL; +} + +static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip, + unsigned int offset) +{ + WARN_ON(1); +} #endif /* CONFIG_GPIOLIB */ -#endif +#endif /* __LINUX_GPIO_DRIVER_H */ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 4971100a8cab..1b9a51a1bccb 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -32,12 +32,15 @@ struct hrtimer_cpu_base; * when starting the timer) * HRTIMER_MODE_SOFT - Timer callback function will be executed in * soft irq context + * HRTIMER_MODE_HARD - Timer callback function will be executed in + * hard irq context even on PREEMPT_RT. */ enum hrtimer_mode { HRTIMER_MODE_ABS = 0x00, HRTIMER_MODE_REL = 0x01, HRTIMER_MODE_PINNED = 0x02, HRTIMER_MODE_SOFT = 0x04, + HRTIMER_MODE_HARD = 0x08, HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, @@ -48,6 +51,11 @@ enum hrtimer_mode { HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, + HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD, + + HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD, + HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD, }; /* @@ -101,6 +109,8 @@ enum hrtimer_restart { * @state: state information (See bit values above) * @is_rel: Set if the timer was armed relative * @is_soft: Set if hrtimer will be expired in soft interrupt context. + * @is_hard: Set if hrtimer will be expired in hard interrupt context + * even on RT. * * The hrtimer structure must be initialized by hrtimer_init() */ @@ -112,6 +122,7 @@ struct hrtimer { u8 state; u8 is_rel; u8 is_soft; + u8 is_hard; }; /** @@ -183,6 +194,10 @@ enum hrtimer_base_type { * @nr_retries: Total number of hrtimer interrupt retries * @nr_hangs: Total number of hrtimer interrupt hangs * @max_hang_time: Maximum time spent in hrtimer_interrupt + * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are + * expired + * @timer_waiters: A hrtimer_cancel() invocation waits for the timer + * callback to finish. * @expires_next: absolute time of the next event, is required for remote * hrtimer enqueue; it is the total first expiry time (hard * and soft hrtimer are taken into account) @@ -210,6 +225,10 @@ struct hrtimer_cpu_base { unsigned short nr_hangs; unsigned int max_hang_time; #endif +#ifdef CONFIG_PREEMPT_RT + spinlock_t softirq_expiry_lock; + atomic_t timer_waiters; +#endif ktime_t expires_next; struct hrtimer *next_timer; ktime_t softirq_expires_next; @@ -341,16 +360,29 @@ extern void hrtimers_resume(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); +#ifdef CONFIG_PREEMPT_RT +void hrtimer_cancel_wait_running(const struct hrtimer *timer); +#else +static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) +{ + cpu_relax(); +} +#endif /* Exported timer functions: */ /* Initialize timers: */ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); +extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); +extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode); extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else @@ -360,6 +392,14 @@ static inline void hrtimer_init_on_stack(struct hrtimer *timer, { hrtimer_init(timer, which_clock, mode); } + +static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, + clockid_t clock_id, + enum hrtimer_mode mode) +{ + hrtimer_init_sleeper(sl, clock_id, mode); +} + static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif @@ -395,6 +435,9 @@ static inline void hrtimer_start_expires(struct hrtimer *timer, hrtimer_start_range_ns(timer, soft, delta, mode); } +void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl, + enum hrtimer_mode mode); + static inline void hrtimer_restart(struct hrtimer *timer) { hrtimer_start_expires(timer, HRTIMER_MODE_ABS); @@ -463,11 +506,8 @@ extern long hrtimer_nanosleep(const struct timespec64 *rqtp, const enum hrtimer_mode mode, const clockid_t clockid); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, - struct task_struct *tsk); - extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, - const enum hrtimer_mode mode); + const enum hrtimer_mode mode); extern int schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, const enum hrtimer_mode mode, diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index 5ecb055fd375..de102e4418ab 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -188,6 +188,10 @@ static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv) struct device *i3cdev_to_dev(struct i3c_device *i3cdev); struct i3c_device *dev_to_i3cdev(struct device *dev); +const struct i3c_device_id * +i3c_device_match_id(struct i3c_device *i3cdev, + const struct i3c_device_id *id_table); + static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev, void *data) { diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 1f08fa8d69d2..9cb39d901cd5 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -71,6 +71,9 @@ struct i2c_dev_boardinfo { * @common: common part of the I2C device descriptor * @boardinfo: pointer to the boardinfo attached to this I2C device * @dev: I2C device object registered to the I2C framework + * @addr: I2C device address + * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about + * the I2C device limitations * * Each I2C device connected on the bus will have an i2c_dev_desc. * This object is created by the core and later attached to the controller @@ -84,6 +87,8 @@ struct i2c_dev_desc { struct i3c_i2c_dev_desc common; const struct i2c_dev_boardinfo *boardinfo; struct i2c_client *dev; + u16 addr; + u8 lvr; }; /** diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6049baa5b8bc..2c620d7ac432 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -36,17 +36,6 @@ extern struct cred init_cred; #define INIT_PREV_CPUTIME(x) #endif -#ifdef CONFIG_POSIX_TIMERS -#define INIT_CPU_TIMERS(s) \ - .cpu_timers = { \ - LIST_HEAD_INIT(s.cpu_timers[0]), \ - LIST_HEAD_INIT(s.cpu_timers[1]), \ - LIST_HEAD_INIT(s.cpu_timers[2]), \ - }, -#else -#define INIT_CPU_TIMERS(s) -#endif - #define INIT_TASK_COMM "swapper" /* Attach to the init_task data structure for proper alignment */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4fc6454f7ebb..ed11ef594378 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -272,6 +272,8 @@ #define dma_frcd_type(d) ((d >> 30) & 1) #define dma_frcd_fault_reason(c) (c & 0xff) #define dma_frcd_source_id(c) (c & 0xffff) +#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) +#define dma_frcd_pasid_present(c) (((c) >> 31) & 1) /* low 64 bit */ #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 5b8328a99b2a..07b527dca996 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -472,7 +472,11 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING +# ifdef CONFIG_PREEMPT_RT +# define force_irqthreads (true) +# else extern bool force_irqthreads; +# endif #else #define force_irqthreads (0) #endif diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index b5a450a3bb47..ec7a13405f10 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IO_PGTABLE_H #define __IO_PGTABLE_H + #include <linux/bitops.h> +#include <linux/iommu.h> /* * Public API for use by IOMMU drivers @@ -17,22 +19,31 @@ enum io_pgtable_fmt { }; /** - * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. + * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. * - * @tlb_flush_all: Synchronously invalidate the entire TLB context. - * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. - * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and - * any corresponding page table updates are visible to the - * IOMMU. + * @tlb_flush_all: Synchronously invalidate the entire TLB context. + * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state + * (sometimes referred to as the "walk cache") for a virtual + * address range. + * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual + * address range. + * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a + * single page. IOMMUs that cannot batch TLB invalidation + * operations efficiently will typically issue them here, but + * others may decide to update the iommu_iotlb_gather structure + * and defer the invalidation until iommu_tlb_sync() instead. * * Note that these can all be called in atomic context and must therefore * not block. */ -struct iommu_gather_ops { +struct iommu_flush_ops { void (*tlb_flush_all)(void *cookie); - void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie); - void (*tlb_sync)(void *cookie); + void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, + void *cookie); + void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, + void *cookie); + void (*tlb_add_page)(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, void *cookie); }; /** @@ -65,10 +76,9 @@ struct io_pgtable_cfg { * (unmapped) entries but the hardware might do so anyway, perform * TLB maintenance when mapping as well as when unmapping. * - * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all - * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit - * when the SoC is in "4GB mode" and they can only access the high - * remap of DRAM (0x1_00000000 to 0x1_ffffffff). + * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend + * to support up to 34 bits PA where the bit32 and bit33 are + * encoded in the bit9 and bit4 of the PTE respectively. * * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * on unmap, for DMA domains using the flush queue mechanism for @@ -77,14 +87,14 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) - #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) + #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; unsigned int oas; bool coherent_walk; - const struct iommu_gather_ops *tlb; + const struct iommu_flush_ops *tlb; struct device *iommu_dev; /* Low-level data specific to the table format */ @@ -128,7 +138,7 @@ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; @@ -184,15 +194,27 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) iop->cfg.tlb->tlb_flush_all(iop->cookie); } -static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, - unsigned long iova, size_t size, size_t granule, bool leaf) +static inline void +io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, + size_t size, size_t granule) +{ + iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); +} + +static inline void +io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, + size_t size, size_t granule) { - iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); + iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); } -static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) +static inline void +io_pgtable_tlb_add_page(struct io_pgtable *iop, + struct iommu_iotlb_gather * gather, unsigned long iova, + size_t granule) { - iop->cfg.tlb->tlb_sync(iop->cookie); + if (iop->cfg.tlb->tlb_add_page) + iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); } /** diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h deleted file mode 100644 index 51e2b9fb6372..000000000000 --- a/include/linux/ioc4.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved. - */ - -#ifndef _LINUX_IOC4_H -#define _LINUX_IOC4_H - -#include <linux/interrupt.h> - -/*************** - * Definitions * - ***************/ - -/* Miscellaneous values inherent to hardware */ - -#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */ - -/*********************************** - * Structures needed by subdrivers * - ***********************************/ - -/* This structure fully describes the IOC4 miscellaneous registers which - * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding - * PCI resource is managed by the main IOC4 driver because it contains - * registers of interest to many different IOC4 subdrivers. - */ -struct ioc4_misc_regs { - /* Miscellaneous IOC4 registers */ - union ioc4_pci_err_addr_l { - uint32_t raw; - struct { - uint32_t valid:1; /* Address captured */ - uint32_t master_id:4; /* Unit causing error - * 0/1: Serial port 0 TX/RX - * 2/3: Serial port 1 TX/RX - * 4/5: Serial port 2 TX/RX - * 6/7: Serial port 3 TX/RX - * 8: ATA/ATAPI - * 9-15: Undefined - */ - uint32_t mul_err:1; /* Multiple errors occurred */ - uint32_t addr:26; /* Bits 31-6 of error addr */ - } fields; - } pci_err_addr_l; - uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */ - union ioc4_sio_int { - uint32_t raw; - struct { - uint8_t tx_mt:1; /* TX ring buffer empty */ - uint8_t rx_full:1; /* RX ring buffer full */ - uint8_t rx_high:1; /* RX high-water exceeded */ - uint8_t rx_timer:1; /* RX timer has triggered */ - uint8_t delta_dcd:1; /* DELTA_DCD seen */ - uint8_t delta_cts:1; /* DELTA_CTS seen */ - uint8_t intr_pass:1; /* Interrupt pass-through */ - uint8_t tx_explicit:1; /* TX, MCW, or delay complete */ - } fields[4]; - } sio_ir; /* Serial interrupt state */ - union ioc4_other_int { - uint32_t raw; - struct { - uint32_t ata_int:1; /* ATA port passthru */ - uint32_t ata_memerr:1; /* ATA halted by mem error */ - uint32_t memerr:4; /* Serial halted by mem err */ - uint32_t kbd_int:1; /* kbd/mouse intr asserted */ - uint32_t reserved:16; /* zero */ - uint32_t rt_int:1; /* INT_OUT section latch */ - uint32_t gen_int:8; /* Intr. from generic pins */ - } fields; - } other_ir; /* Other interrupt state */ - union ioc4_sio_int sio_ies; /* Serial interrupt enable set */ - union ioc4_other_int other_ies; /* Other interrupt enable set */ - union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */ - union ioc4_other_int other_iec; /* Other interrupt enable clear */ - union ioc4_sio_cr { - uint32_t raw; - struct { - uint32_t cmd_pulse:4; /* Bytebus strobe width */ - uint32_t arb_diag:3; /* PCI bus requester */ - uint32_t sio_diag_idle:1; /* Active ser req? */ - uint32_t ata_diag_idle:1; /* Active ATA req? */ - uint32_t ata_diag_active:1; /* ATA req is winner */ - uint32_t reserved:22; /* zero */ - } fields; - } sio_cr; - uint32_t unused1; - union ioc4_int_out { - uint32_t raw; - struct { - uint32_t count:16; /* Period control */ - uint32_t mode:3; /* Output signal shape */ - uint32_t reserved:11; /* zero */ - uint32_t diag:1; /* Timebase control */ - uint32_t int_out:1; /* Current value */ - } fields; - } int_out; /* External interrupt output control */ - uint32_t unused2; - union ioc4_gpcr { - uint32_t raw; - struct { - uint32_t dir:8; /* Pin direction */ - uint32_t edge:8; /* Edge/level mode */ - uint32_t reserved1:4; /* zero */ - uint32_t int_out_en:1; /* INT_OUT enable */ - uint32_t reserved2:11; /* zero */ - } fields; - } gpcr_s; /* Generic PIO control set */ - union ioc4_gpcr gpcr_c; /* Generic PIO control clear */ - union ioc4_gpdr { - uint32_t raw; - struct { - uint32_t gen_pin:8; /* State of pins */ - uint32_t reserved:24; - } fields; - } gpdr; /* Generic PIO data */ - uint32_t unused3; - union ioc4_gppr { - uint32_t raw; - struct { - uint32_t gen_pin:1; /* Single pin state */ - uint32_t reserved:31; - } fields; - } gppr[8]; /* Generic PIO pins */ -}; - -/* Masks for GPCR DIR pins */ -#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */ -#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */ -#define IOC4_GPCR_DIR_2 0x04 -#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */ -#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */ -#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */ -#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */ -#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */ - -/* Masks for GPCR EDGE pins */ -#define IOC4_GPCR_EDGE_0 0x01 -#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */ -#define IOC4_GPCR_EDGE_2 0x04 -#define IOC4_GPCR_EDGE_3 0x08 -#define IOC4_GPCR_EDGE_4 0x10 -#define IOC4_GPCR_EDGE_5 0x20 -#define IOC4_GPCR_EDGE_6 0x40 -#define IOC4_GPCR_EDGE_7 0x80 - -#define IOC4_VARIANT_IO9 0x0900 -#define IOC4_VARIANT_PCI_RT 0x0901 -#define IOC4_VARIANT_IO10 0x1000 - -/* One of these per IOC4 */ -struct ioc4_driver_data { - struct list_head idd_list; - unsigned long idd_bar0; - struct pci_dev *idd_pdev; - const struct pci_device_id *idd_pci_id; - struct ioc4_misc_regs __iomem *idd_misc_regs; - unsigned long count_period; - void *idd_serial_data; - unsigned int idd_variant; -}; - -/* One per submodule */ -struct ioc4_submodule { - struct list_head is_list; - char *is_name; - struct module *is_owner; - int (*is_probe) (struct ioc4_driver_data *); - int (*is_remove) (struct ioc4_driver_data *); -}; - -#define IOC4_NUM_CARDS 8 /* max cards per partition */ - -/********************************** - * Functions needed by submodules * - **********************************/ - -extern int ioc4_register_submodule(struct ioc4_submodule *); -extern void ioc4_unregister_submodule(struct ioc4_submodule *); - -#endif /* _LINUX_IOC4_H */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index fdc355ccc570..29bac5345563 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -192,6 +192,23 @@ struct iommu_sva_ops { #ifdef CONFIG_IOMMU_API /** + * struct iommu_iotlb_gather - Range information for a pending IOTLB flush + * + * @start: IOVA representing the start of the range to be flushed + * @end: IOVA representing the end of the range to be flushed (exclusive) + * @pgsize: The interval at which to perform the flush + * + * This structure is intended to be updated by multiple calls to the + * ->unmap() function in struct iommu_ops before eventually being passed + * into ->iotlb_sync(). + */ +struct iommu_iotlb_gather { + unsigned long start; + unsigned long end; + size_t pgsize; +}; + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @domain_alloc: allocate iommu domain @@ -201,7 +218,6 @@ struct iommu_sva_ops { * @map: map a physically contiguous memory region to an iommu domain * @unmap: unmap a physically contiguous memory region from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain - * @iotlb_range_add: Add a given iova range to the flush queue for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue @@ -242,12 +258,11 @@ struct iommu_ops { int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_range_add)(struct iommu_domain *domain, - unsigned long iova, size_t size); void (*iotlb_sync_map)(struct iommu_domain *domain); - void (*iotlb_sync)(struct iommu_domain *domain); + void (*iotlb_sync)(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); int (*add_device)(struct device *dev); void (*remove_device)(struct device *dev); @@ -378,6 +393,13 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev) return (struct iommu_device *)dev_get_drvdata(dev); } +static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) +{ + *gather = (struct iommu_iotlb_gather) { + .start = ULONG_MAX, + }; +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -402,7 +424,8 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size); extern size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, size_t size); + unsigned long iova, size_t size, + struct iommu_iotlb_gather *iotlb_gather); extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg,unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); @@ -413,6 +436,9 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_request_dma_domain_for_dev(struct device *dev); +extern void iommu_set_default_passthrough(bool cmd_line); +extern void iommu_set_default_translated(bool cmd_line); +extern bool iommu_default_passthrough(void); extern struct iommu_resv_region * iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, enum iommu_resv_type type); @@ -476,17 +502,38 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain) domain->ops->flush_iotlb_all(domain); } -static inline void iommu_tlb_range_add(struct iommu_domain *domain, - unsigned long iova, size_t size) +static inline void iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather) { - if (domain->ops->iotlb_range_add) - domain->ops->iotlb_range_add(domain, iova, size); + if (domain->ops->iotlb_sync) + domain->ops->iotlb_sync(domain, iotlb_gather); + + iommu_iotlb_gather_init(iotlb_gather); } -static inline void iommu_tlb_sync(struct iommu_domain *domain) +static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) { - if (domain->ops->iotlb_sync) - domain->ops->iotlb_sync(domain); + unsigned long start = iova, end = start + size; + + /* + * If the new page is disjoint from the current range or is mapped at + * a different granularity, then sync the TLB so that the gather + * structure can be rewritten. + */ + if (gather->pgsize != size || + end < gather->start || start > gather->end) { + if (gather->pgsize) + iommu_tlb_sync(domain, gather); + gather->pgsize = size; + } + + if (gather->end < end) + gather->end = end; + + if (gather->start > start) + gather->start = start; } /* PCI device grouping function */ @@ -567,6 +614,7 @@ struct iommu_group {}; struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; +struct iommu_iotlb_gather {}; static inline bool iommu_present(struct bus_type *bus) { @@ -621,7 +669,8 @@ static inline size_t iommu_unmap(struct iommu_domain *domain, } static inline size_t iommu_unmap_fast(struct iommu_domain *domain, - unsigned long iova, int gfp_order) + unsigned long iova, int gfp_order, + struct iommu_iotlb_gather *iotlb_gather) { return 0; } @@ -637,12 +686,8 @@ static inline void iommu_flush_tlb_all(struct iommu_domain *domain) { } -static inline void iommu_tlb_range_add(struct iommu_domain *domain, - unsigned long iova, size_t size) -{ -} - -static inline void iommu_tlb_sync(struct iommu_domain *domain) +static inline void iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather) { } @@ -694,6 +739,19 @@ static inline int iommu_request_dma_domain_for_dev(struct device *dev) return -ENODEV; } +static inline void iommu_set_default_passthrough(bool cmd_line) +{ +} + +static inline void iommu_set_default_translated(bool cmd_line) +{ +} + +static inline bool iommu_default_passthrough(void) +{ + return true; +} + static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) { @@ -827,6 +885,16 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev) return NULL; } +static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 67c4b9806d43..5cc10cf7cb3e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -30,10 +30,22 @@ #define GICD_ICFGR 0x0C00 #define GICD_IGRPMODR 0x0D00 #define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 #define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 #define GICD_IDREGS 0xFFD0 #define GICD_PIDR2 0xFFE8 +#define ESPI_BASE_INTID 4096 + /* * Those registers are actually from GICv2, but the spec demands that they * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). @@ -69,10 +81,13 @@ #define GICD_TYPER_RSS (1U << 26) #define GICD_TYPER_LPIS (1U << 17) #define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) #define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) -#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) @@ -109,6 +124,18 @@ #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + #define GICR_WAKER_ProcessorSleep (1U << 1) #define GICR_WAKER_ChildrenAsleep (1U << 2) @@ -469,6 +496,7 @@ #define ICC_CTLR_EL1_A3V_SHIFT 15 #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) #define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) #define ICC_PMR_EL1_SHIFT 0 #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) #define ICC_BPR0_EL1_SHIFT 0 diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h index a783ddb58444..2f6ae7551748 100644 --- a/include/linux/irqchip/irq-partition-percpu.h +++ b/include/linux/irqchip/irq-partition-percpu.h @@ -4,6 +4,9 @@ * Author: Marc Zyngier <marc.zyngier@arm.com> */ +#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H +#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H + #include <linux/fwnode.h> #include <linux/cpumask.h> #include <linux/irqdomain.h> @@ -46,3 +49,5 @@ struct irq_domain *partition_get_domain(struct partition_desc *dsc) return NULL; } #endif + +#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 07ec8b390161..583e7abd07f9 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -220,7 +220,7 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) #ifdef CONFIG_IRQ_DOMAIN struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, - const char *name, void *data); + const char *name, phys_addr_t *pa); enum { IRQCHIP_FWNODE_REAL, @@ -241,9 +241,9 @@ struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id) NULL); } -static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data) +static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) { - return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data); + return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa); } void irq_domain_free_fwnode(struct fwnode_handle *fwnode); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index b9b1bc5f9669..f0b809258ed3 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -216,6 +216,29 @@ extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, void **addr, unsigned long *sz); #endif /* CONFIG_KEXEC_FILE */ +#ifdef CONFIG_KEXEC_ELF +struct kexec_elf_info { + /* + * Where the ELF binary contents are kept. + * Memory managed by the user of the struct. + */ + const char *buffer; + + const struct elfhdr *ehdr; + const struct elf_phdr *proghdrs; +}; + +int kexec_build_elf_info(const char *buf, size_t len, struct elfhdr *ehdr, + struct kexec_elf_info *elf_info); + +int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, + struct kexec_elf_info *elf_info, + struct kexec_buf *kbuf, + unsigned long *lowest_load_addr); + +void kexec_free_elf_info(struct kexec_elf_info *elf_info); +int kexec_elf_probe(const char *buf, unsigned long len); +#endif struct kimage { kimage_entry_t head; kimage_entry_t *entry; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 0b0d7259276d..b8a835fd611b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,10 +66,7 @@ struct lock_class_key { extern struct lock_class_key __lockdep_no_validate__; -struct lock_trace { - unsigned int nr_entries; - unsigned int offset; -}; +struct lock_trace; #define LOCKSTAT_POINTS 4 @@ -97,7 +94,7 @@ struct lock_class { */ struct list_head locks_after, locks_before; - struct lockdep_subclass_key *key; + const struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; @@ -105,7 +102,7 @@ struct lock_class { * IRQ/softirq usage tracking bits: */ unsigned long usage_mask; - struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES]; + const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; /* * Generation counter, when doing certain classes of graph walking, @@ -193,7 +190,7 @@ struct lock_list { struct list_head entry; struct lock_class *class; struct lock_class *links_to; - struct lock_trace trace; + const struct lock_trace *trace; int distance; /* diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h new file mode 100644 index 000000000000..490db6886dcc --- /dev/null +++ b/include/linux/moxtet.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Turris Mox module configuration bus driver + * + * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz> + */ + +#ifndef __LINUX_MOXTET_H +#define __LINUX_MOXTET_H + +#include <linux/device.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mutex.h> + +#define TURRIS_MOX_MAX_MODULES 10 + +enum turris_mox_cpu_module_id { + TURRIS_MOX_CPU_ID_EMMC = 0x00, + TURRIS_MOX_CPU_ID_SD = 0x10, +}; + +enum turris_mox_module_id { + TURRIS_MOX_MODULE_FIRST = 0x01, + + TURRIS_MOX_MODULE_SFP = 0x01, + TURRIS_MOX_MODULE_PCI = 0x02, + TURRIS_MOX_MODULE_TOPAZ = 0x03, + TURRIS_MOX_MODULE_PERIDOT = 0x04, + TURRIS_MOX_MODULE_USB3 = 0x05, + TURRIS_MOX_MODULE_PCI_BRIDGE = 0x06, + + TURRIS_MOX_MODULE_LAST = 0x06, +}; + +#define MOXTET_NIRQS 16 + +extern struct bus_type moxtet_type; + +struct moxtet { + struct device *dev; + struct mutex lock; + u8 modules[TURRIS_MOX_MAX_MODULES]; + int count; + u8 tx[TURRIS_MOX_MAX_MODULES]; + int dev_irq; + struct { + struct irq_domain *domain; + struct irq_chip chip; + unsigned long masked, exists; + struct moxtet_irqpos { + u8 idx; + u8 bit; + } position[MOXTET_NIRQS]; + } irq; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; +#endif +}; + +struct moxtet_driver { + const enum turris_mox_module_id *id_table; + struct device_driver driver; +}; + +static inline struct moxtet_driver * +to_moxtet_driver(struct device_driver *drv) +{ + if (!drv) + return NULL; + return container_of(drv, struct moxtet_driver, driver); +} + +extern int __moxtet_register_driver(struct module *owner, + struct moxtet_driver *mdrv); + +static inline void moxtet_unregister_driver(struct moxtet_driver *mdrv) +{ + if (mdrv) + driver_unregister(&mdrv->driver); +} + +#define moxtet_register_driver(driver) \ + __moxtet_register_driver(THIS_MODULE, driver) + +#define module_moxtet_driver(__moxtet_driver) \ + module_driver(__moxtet_driver, moxtet_register_driver, \ + moxtet_unregister_driver) + +struct moxtet_device { + struct device dev; + struct moxtet *moxtet; + enum turris_mox_module_id id; + unsigned int idx; +}; + +extern int moxtet_device_read(struct device *dev); +extern int moxtet_device_write(struct device *dev, u8 val); +extern int moxtet_device_written(struct device *dev); + +static inline struct moxtet_device * +to_moxtet_device(struct device *dev) +{ + if (!dev) + return NULL; + return container_of(dev, struct moxtet_device, dev); +} + +#endif /* __LINUX_MOXTET_H */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index dcd03fee6e01..aca8f36dfac9 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -66,16 +66,6 @@ struct mutex { }; /* - * Internal helper function; C doesn't allow us to hide it :/ - * - * DO NOT USE (outside of mutex code). - */ -static inline struct task_struct *__mutex_owner(struct mutex *lock) -{ - return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07); -} - -/* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: */ @@ -144,10 +134,7 @@ extern void __mutex_init(struct mutex *lock, const char *name, * * Returns true if the mutex is locked, false if unlocked. */ -static inline bool mutex_is_locked(struct mutex *lock) -{ - return __mutex_owner(lock) != NULL; -} +extern bool mutex_is_locked(struct mutex *lock); /* * See kernel/locking/mutex.c for detailed documentation of these APIs. @@ -220,13 +207,7 @@ enum mutex_trylock_recursive_enum { * - MUTEX_TRYLOCK_SUCCESS - lock acquired, * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock. */ -static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum -mutex_trylock_recursive(struct mutex *lock) -{ - if (unlikely(__mutex_owner(lock) == current)) - return MUTEX_TRYLOCK_RECURSIVE; - - return mutex_trylock(lock); -} +extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum +mutex_trylock_recursive(struct mutex *lock); #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f9737dea9d1f..16967390a3fe 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -61,10 +61,6 @@ static inline int of_mm_gpiochip_add(struct device_node *np, } extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); -extern int of_gpio_simple_xlate(struct gpio_chip *gc, - const struct of_phandle_args *gpiospec, - u32 *flags); - #else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ @@ -77,13 +73,6 @@ static inline int of_get_named_gpio_flags(struct device_node *np, return -ENOSYS; } -static inline int of_gpio_simple_xlate(struct gpio_chip *gc, - const struct of_phandle_args *gpiospec, - u32 *flags) -{ - return -ENOSYS; -} - #endif /* CONFIG_OF_GPIO */ /** diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h index 153bf25b4df3..2c32ca09df02 100644 --- a/include/linux/omap-iommu.h +++ b/include/linux/omap-iommu.h @@ -10,12 +10,27 @@ #ifndef _OMAP_IOMMU_H_ #define _OMAP_IOMMU_H_ +struct iommu_domain; + #ifdef CONFIG_OMAP_IOMMU extern void omap_iommu_save_ctx(struct device *dev); extern void omap_iommu_restore_ctx(struct device *dev); + +int omap_iommu_domain_deactivate(struct iommu_domain *domain); +int omap_iommu_domain_activate(struct iommu_domain *domain); #else static inline void omap_iommu_save_ctx(struct device *dev) {} static inline void omap_iommu_restore_ctx(struct device *dev) {} + +static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain) +{ + return -ENODEV; +} + +static inline int omap_iommu_domain_activate(struct iommu_domain *domain) +{ + return -ENODEV; +} #endif #endif diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c842735a4f45..24486f405f12 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -548,6 +548,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -1070,7 +1071,6 @@ #define PCI_VENDOR_ID_SGI 0x10a9 #define PCI_DEVICE_ID_SGI_IOC3 0x0003 #define PCI_DEVICE_ID_SGI_LITHIUM 0x1002 -#define PCI_DEVICE_ID_SGI_IOC4 0x100a #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e8ad3c590a23..61448c19a132 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -246,6 +246,7 @@ struct perf_event; #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 #define PERF_PMU_CAP_NO_EXCLUDE 0x80 +#define PERF_PMU_CAP_AUX_OUTPUT 0x100 /** * struct pmu - generic performance monitoring unit @@ -447,6 +448,16 @@ struct pmu { /* optional */ /* + * Check if event can be used for aux_output purposes for + * events of this PMU. + * + * Runs from perf_event_open(). Should return 0 for "no match" + * or non-zero for "match". + */ + int (*aux_output_match) (struct perf_event *event); + /* optional */ + + /* * Filter events for PMU-specific reasons. */ int (*filter_match) (struct perf_event *event); /* optional */ @@ -681,6 +692,9 @@ struct perf_event { struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; + /* for aux_output events */ + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); struct rcu_head rcu_head; diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 1e5d86ebdaeb..52bc8e487ef7 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -11,6 +11,7 @@ struct fixed_phy_status { }; struct device_node; +struct gpio_desc; #if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); diff --git a/include/linux/pid.h b/include/linux/pid.h index 2a83e434db9d..9645b1194c98 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -72,6 +72,10 @@ extern struct pid init_struct_pid; extern const struct file_operations pidfd_fops; +struct file; + +extern struct pid *pidfd_pid(const struct file *file); + static inline struct pid *get_pid(struct pid *pid) { if (pid) diff --git a/include/linux/platform_data/dma-iop32x.h b/include/linux/platform_data/dma-iop32x.h new file mode 100644 index 000000000000..ac83cff89549 --- /dev/null +++ b/include/linux/platform_data/dma-iop32x.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2006, Intel Corporation. + */ +#ifndef IOP_ADMA_H +#define IOP_ADMA_H +#include <linux/types.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#define IOP_ADMA_SLOT_SIZE 32 +#define IOP_ADMA_THRESHOLD 4 +#ifdef DEBUG +#define IOP_PARANOIA 1 +#else +#define IOP_PARANOIA 0 +#endif +#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x)) + +#define DMA0_ID 0 +#define DMA1_ID 1 +#define AAU_ID 2 + +/** + * struct iop_adma_device - internal representation of an ADMA device + * @pdev: Platform device + * @id: HW ADMA Device selector + * @dma_desc_pool: base of DMA descriptor region (DMA address) + * @dma_desc_pool_virt: base of DMA descriptor region (CPU address) + * @common: embedded struct dma_device + */ +struct iop_adma_device { + struct platform_device *pdev; + int id; + dma_addr_t dma_desc_pool; + void *dma_desc_pool_virt; + struct dma_device common; +}; + +/** + * struct iop_adma_chan - internal representation of an ADMA device + * @pending: allows batching of hardware operations + * @lock: serializes enqueue/dequeue operations to the slot pool + * @mmr_base: memory mapped register base + * @chain: device chain view of the descriptors + * @device: parent device + * @common: common dmaengine channel object members + * @last_used: place holder for allocation to continue from where it left off + * @all_slots: complete domain of slots usable by the channel + * @slots_allocated: records the actual size of the descriptor slot pool + * @irq_tasklet: bottom half where iop_adma_slot_cleanup runs + */ +struct iop_adma_chan { + int pending; + spinlock_t lock; /* protects the descriptor slot pool */ + void __iomem *mmr_base; + struct list_head chain; + struct iop_adma_device *device; + struct dma_chan common; + struct iop_adma_desc_slot *last_used; + struct list_head all_slots; + int slots_allocated; + struct tasklet_struct irq_tasklet; +}; + +/** + * struct iop_adma_desc_slot - IOP-ADMA software descriptor + * @slot_node: node on the iop_adma_chan.all_slots list + * @chain_node: node on the op_adma_chan.chain list + * @hw_desc: virtual address of the hardware descriptor chain + * @phys: hardware address of the hardware descriptor chain + * @group_head: first operation in a transaction + * @slot_cnt: total slots used in an transaction (group of operations) + * @slots_per_op: number of slots per operation + * @idx: pool index + * @tx_list: list of descriptors that are associated with one operation + * @async_tx: support for the async_tx api + * @group_list: list of slots that make up a multi-descriptor transaction + * for example transfer lengths larger than the supported hw max + * @xor_check_result: result of zero sum + * @crc32_result: result crc calculation + */ +struct iop_adma_desc_slot { + struct list_head slot_node; + struct list_head chain_node; + void *hw_desc; + struct iop_adma_desc_slot *group_head; + u16 slot_cnt; + u16 slots_per_op; + u16 idx; + struct list_head tx_list; + struct dma_async_tx_descriptor async_tx; + union { + u32 *xor_check_result; + u32 *crc32_result; + u32 *pq_check_result; + }; +}; + +struct iop_adma_platform_data { + int hw_id; + dma_cap_mask_t cap_mask; + size_t pool_size; +}; + +#define to_iop_sw_desc(addr_hw_desc) \ + container_of(addr_hw_desc, struct iop_adma_desc_slot, hw_desc) +#define iop_hw_desc_slot_idx(hw_desc, idx) \ + ( (void *) (((unsigned long) hw_desc) + ((idx) << 5)) ) +#endif diff --git a/include/linux/platform_data/gpio-htc-egpio.h b/include/linux/platform_data/gpio-htc-egpio.h index 9a3e78082883..eaefba0b6465 100644 --- a/include/linux/platform_data/gpio-htc-egpio.h +++ b/include/linux/platform_data/gpio-htc-egpio.h @@ -50,7 +50,4 @@ struct htc_egpio_platform_data { int num_chips; }; -/* Determine the wakeup irq, to be called during early resume */ -extern int htc_egpio_get_wakeup_irq(struct device *dev); - #endif diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 44d913a7580c..8474a0208b34 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -13,4 +13,8 @@ struct iommu_platform_data { const char *reset_name; int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); + int (*device_enable)(struct platform_device *pdev); + int (*device_idle)(struct platform_device *pdev); + int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request, + u8 *pwrst); }; diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h deleted file mode 100644 index ca3510877000..000000000000 --- a/include/linux/platform_data/spi-nuc900.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2009 Nuvoton technology corporation. - * - * Wan ZongShun <mcuos.com@gmail.com> - */ - -#ifndef __SPI_NUC900_H -#define __SPI_NUC900_H - -extern void mfp_set_groupg(struct device *dev, const char *subname); - -struct nuc900_spi_info { - unsigned int num_cs; - unsigned int lsb; - unsigned int txneg; - unsigned int rxneg; - unsigned int divider; - unsigned int sleep; - unsigned int txnum; - unsigned int txbitlen; - int bus_num; -}; - -struct nuc900_spi_chip { - unsigned char bits_per_word; -}; - -#endif /* __SPI_NUC900_H */ diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h deleted file mode 100644 index 3da504460c91..000000000000 --- a/include/linux/platform_data/video-nuc900fb.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* linux/include/asm/arch-nuc900/fb.h - * - * Copyright (c) 2008 Nuvoton technology corporation - * All rights reserved. - * - * Changelog: - * - * 2008/08/26 vincen.zswan modify this file for LCD. - */ - -#ifndef __ASM_ARM_FB_H -#define __ASM_ARM_FB_H - - - -/* LCD Controller Hardware Desc */ -struct nuc900fb_hw { - unsigned int lcd_dccs; - unsigned int lcd_device_ctrl; - unsigned int lcd_mpulcd_cmd; - unsigned int lcd_int_cs; - unsigned int lcd_crtc_size; - unsigned int lcd_crtc_dend; - unsigned int lcd_crtc_hr; - unsigned int lcd_crtc_hsync; - unsigned int lcd_crtc_vr; - unsigned int lcd_va_baddr0; - unsigned int lcd_va_baddr1; - unsigned int lcd_va_fbctrl; - unsigned int lcd_va_scale; - unsigned int lcd_va_test; - unsigned int lcd_va_win; - unsigned int lcd_va_stuff; -}; - -/* LCD Display Description */ -struct nuc900fb_display { - /* LCD Image type */ - unsigned type; - - /* LCD Screen Size */ - unsigned short width; - unsigned short height; - - /* LCD Screen Info */ - unsigned short xres; - unsigned short yres; - unsigned short bpp; - - unsigned long pixclock; - unsigned short left_margin; - unsigned short right_margin; - unsigned short hsync_len; - unsigned short upper_margin; - unsigned short lower_margin; - unsigned short vsync_len; - - /* hardware special register value */ - unsigned int dccs; - unsigned int devctl; - unsigned int fbctrl; - unsigned int scale; -}; - -struct nuc900fb_mach_info { - struct nuc900fb_display *displays; - unsigned num_displays; - unsigned default_display; - /* GPIO Setting Info */ - unsigned gpio_dir; - unsigned gpio_dir_mask; - unsigned gpio_data; - unsigned gpio_data_mask; -}; - -extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *); - -#endif /* __ASM_ARM_FB_H */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 4802cd2c7309..60249e22e844 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -12,7 +12,7 @@ #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ -#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */ +#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* Atk Generic FuNction */ #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ @@ -72,7 +72,8 @@ /* Fan, Thermal */ #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 -#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 +#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */ +#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013 /* Power */ #define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012 @@ -80,6 +81,9 @@ /* Deep S3 / Resume on LID open */ #define ASUS_WMI_DEVID_LID_RESUME 0x00120031 +/* Maximum charging percentage */ +#define ASUS_WMI_DEVID_RSOC 0x00120057 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index b20798fc5191..3d10c84a97a9 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -4,18 +4,11 @@ #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/sched.h> -#include <linux/timex.h> #include <linux/alarmtimer.h> +#include <linux/timerqueue.h> -struct siginfo; - -struct cpu_timer_list { - struct list_head entry; - u64 expires; - struct task_struct *task; - int firing; -}; +struct kernel_siginfo; +struct task_struct; /* * Bit fields within a clockid: @@ -63,6 +56,115 @@ static inline int clockid_to_fd(const clockid_t clk) return ~(clk >> 3); } +#ifdef CONFIG_POSIX_TIMERS + +/** + * cpu_timer - Posix CPU timer representation for k_itimer + * @node: timerqueue node to queue in the task/sig + * @head: timerqueue head on which this timer is queued + * @task: Pointer to target task + * @elist: List head for the expiry list + * @firing: Timer is currently firing + */ +struct cpu_timer { + struct timerqueue_node node; + struct timerqueue_head *head; + struct task_struct *task; + struct list_head elist; + int firing; +}; + +static inline bool cpu_timer_enqueue(struct timerqueue_head *head, + struct cpu_timer *ctmr) +{ + ctmr->head = head; + return timerqueue_add(head, &ctmr->node); +} + +static inline void cpu_timer_dequeue(struct cpu_timer *ctmr) +{ + if (ctmr->head) { + timerqueue_del(ctmr->head, &ctmr->node); + ctmr->head = NULL; + } +} + +static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) +{ + return ctmr->node.expires; +} + +static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) +{ + ctmr->node.expires = exp; +} + +/** + * posix_cputimer_base - Container per posix CPU clock + * @nextevt: Earliest-expiration cache + * @tqhead: timerqueue head for cpu_timers + */ +struct posix_cputimer_base { + u64 nextevt; + struct timerqueue_head tqhead; +}; + +/** + * posix_cputimers - Container for posix CPU timer related data + * @bases: Base container for posix CPU clocks + * @timers_active: Timers are queued. + * @expiry_active: Timer expiry is active. Used for + * process wide timers to avoid multiple + * task trying to handle expiry concurrently + * + * Used in task_struct and signal_struct + */ +struct posix_cputimers { + struct posix_cputimer_base bases[CPUCLOCK_MAX]; + unsigned int timers_active; + unsigned int expiry_active; +}; + +static inline void posix_cputimers_init(struct posix_cputimers *pct) +{ + memset(pct, 0, sizeof(*pct)); + pct->bases[0].nextevt = U64_MAX; + pct->bases[1].nextevt = U64_MAX; + pct->bases[2].nextevt = U64_MAX; +} + +void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); + +static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, + u64 runtime) +{ + pct->bases[CPUCLOCK_SCHED].nextevt = runtime; +} + +/* Init task static initializer */ +#define INIT_CPU_TIMERBASE(b) { \ + .nextevt = U64_MAX, \ +} + +#define INIT_CPU_TIMERBASES(b) { \ + INIT_CPU_TIMERBASE(b[0]), \ + INIT_CPU_TIMERBASE(b[1]), \ + INIT_CPU_TIMERBASE(b[2]), \ +} + +#define INIT_CPU_TIMERS(s) \ + .posix_cputimers = { \ + .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ + }, +#else +struct posix_cputimers { }; +struct cpu_timer { }; +#define INIT_CPU_TIMERS(s) +static inline void posix_cputimers_init(struct posix_cputimers *pct) { } +static inline void posix_cputimers_group_init(struct posix_cputimers *pct, + u64 cpu_limit) { } +#endif + #define REQUEUE_PENDING 1 /** @@ -85,7 +187,8 @@ static inline int clockid_to_fd(const clockid_t clk) * @it_process: The task to wakeup on clock_nanosleep (CPU timers) * @sigq: Pointer to preallocated sigqueue * @it: Union representing the various posix timer type - * internals. Also used for rcu freeing the timer. + * internals. + * @rcu: RCU head for freeing the timer. */ struct k_itimer { struct list_head list; @@ -110,15 +213,15 @@ struct k_itimer { struct { struct hrtimer timer; } real; - struct cpu_timer_list cpu; + struct cpu_timer cpu; struct { struct alarm alarmtimer; } alarm; - struct rcu_head rcu; } it; + struct rcu_head rcu; }; -void run_posix_cpu_timers(struct task_struct *task); +void run_posix_cpu_timers(void); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, diff --git a/include/linux/preempt.h b/include/linux/preempt.h index dd92b1a93919..bbb68dba37cc 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -182,7 +182,7 @@ do { \ #define preemptible() (preempt_count() == 0 && !irqs_disabled()) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION #define preempt_enable() \ do { \ barrier(); \ @@ -203,7 +203,7 @@ do { \ __preempt_schedule(); \ } while (0) -#else /* !CONFIG_PREEMPT */ +#else /* !CONFIG_PREEMPTION */ #define preempt_enable() \ do { \ barrier(); \ @@ -217,7 +217,7 @@ do { \ } while (0) #define preempt_check_resched() do { } while (0) -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #define preempt_disable_notrace() \ do { \ diff --git a/include/linux/psci.h b/include/linux/psci.h index a8a15613c157..e2bacc6fd2f2 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -15,8 +15,8 @@ bool psci_tos_resident_on(int cpu); -int psci_cpu_init_idle(unsigned int cpu); -int psci_cpu_suspend_enter(unsigned long index); +int psci_cpu_suspend_enter(u32 state); +bool psci_power_state_is_valid(u32 state); enum psci_conduit { PSCI_CONDUIT_NONE, diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 3f12cc77fb58..2d5eff506e13 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -49,8 +49,9 @@ extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, extern int qcom_scm_pas_auth_and_reset(u32 peripheral); extern int qcom_scm_pas_shutdown(u32 peripheral); extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - unsigned int *src, struct qcom_scm_vmperm *newvm, - int dest_cnt); + unsigned int *src, + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt); extern void qcom_scm_cpu_power_down(u32 flags); extern u32 qcom_scm_get_version(void); extern int qcom_scm_set_remote_state(u32 state, u32 id); @@ -87,8 +88,8 @@ qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, unsigned int *src, - struct qcom_scm_vmperm *newvm, - int dest_cnt) { return -ENODEV; } + const struct qcom_scm_vmperm *newvm, + unsigned int dest_cnt) { return -ENODEV; } static inline void qcom_scm_cpu_power_down(u32 flags) {} static inline u32 qcom_scm_get_version(void) { return 0; } static inline u32 diff --git a/include/linux/random.h b/include/linux/random.h index 1f7dced2bba6..f189c927fdea 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -19,6 +19,7 @@ struct random_ready_callback { }; extern void add_device_randomness(const void *, unsigned int); +extern void add_bootloader_randomness(const void *, unsigned int); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 87404cb015f1..646759042333 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -14,6 +14,9 @@ #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H #define __INCLUDE_LINUX_RCU_SEGCBLIST_H +#include <linux/types.h> +#include <linux/atomic.h> + /* Simple unsegmented callback lists. */ struct rcu_cblist { struct rcu_head *head; @@ -65,8 +68,14 @@ struct rcu_segcblist { struct rcu_head *head; struct rcu_head **tails[RCU_CBLIST_NSEGS]; unsigned long gp_seq[RCU_CBLIST_NSEGS]; +#ifdef CONFIG_RCU_NOCB_CPU + atomic_long_t len; +#else long len; +#endif long len_lazy; + u8 enabled; + u8 offloaded; }; #define RCU_SEGCBLIST_INITIALIZER(n) \ diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index 9b83865d24f9..0027d4c8087c 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h @@ -31,9 +31,7 @@ struct rcu_sync { */ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp) { - RCU_LOCKDEP_WARN(!rcu_read_lock_held() && - !rcu_read_lock_bh_held() && - !rcu_read_lock_sched_held(), + RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(), "suspicious rcu_sync_is_idle() usage"); return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ } diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e91ec9ddcd30..4158b7212936 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -41,6 +41,24 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) /* + * Check during list traversal that we are within an RCU reader + */ + +#define check_arg_count_one(dummy) + +#ifdef CONFIG_PROVE_RCU_LIST +#define __list_check_rcu(dummy, cond, extra...) \ + ({ \ + check_arg_count_one(extra); \ + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \ + "RCU-list traversed in non-reader section!"); \ + }) +#else +#define __list_check_rcu(dummy, cond, extra...) \ + ({ check_arg_count_one(extra); }) +#endif + +/* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know @@ -343,14 +361,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define list_for_each_entry_rcu(pos, head, member) \ - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ - &pos->member != (head); \ +#define list_for_each_entry_rcu(pos, head, member, cond...) \ + for (__list_check_rcu(dummy, ## cond, 0), \ + pos = list_entry_rcu((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** @@ -616,13 +636,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define hlist_for_each_entry_rcu(pos, head, member) \ - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \ + for (__list_check_rcu(dummy, ## cond, 0), \ + pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ @@ -642,10 +664,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ - for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ + for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ + pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8f7167478c1d..75a2eded7aa2 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -221,6 +221,7 @@ int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); +int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -241,6 +242,12 @@ static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } + +static inline int rcu_read_lock_any_held(void) +{ + return !preemptible(); +} + #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU @@ -476,7 +483,7 @@ do { \ * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ -#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) +#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented @@ -578,7 +585,7 @@ do { \ * * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. - * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT + * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 8e727f57d814..9bf1dfe7781f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -12,7 +12,7 @@ #ifndef __LINUX_TINY_H #define __LINUX_TINY_H -#include <linux/ktime.h> +#include <asm/param.h> /* for HZ */ /* Never flag non-existent other CPUs! */ static inline bool rcu_eqs_special_set(int cpu) { return false; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 735601ac27d3..18b1ed9864b0 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -53,7 +53,7 @@ void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_is_watching(void); -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION void rcu_all_qs(void); #endif diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 815983419375..337a46391527 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -281,6 +281,12 @@ void devm_regulator_unregister_notifier(struct regulator *regulator, void *regulator_get_drvdata(struct regulator *regulator); void regulator_set_drvdata(struct regulator *regulator, void *data); +/* misc helpers */ + +void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, + const char *const *supply_names, + unsigned int num_supplies); + #else /* @@ -580,6 +586,13 @@ static inline int regulator_list_voltage(struct regulator *regulator, unsigned s return -EINVAL; } +static inline void +regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, + const char *const *supply_names, + unsigned int num_supplies) +{ +} + #endif static inline int regulator_set_voltage_triplet(struct regulator *regulator, diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h new file mode 100644 index 000000000000..1cc304946d09 --- /dev/null +++ b/include/linux/regulator/mt6358-regulator.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + */ + +#ifndef __LINUX_REGULATOR_MT6358_H +#define __LINUX_REGULATOR_MT6358_H + +enum { + MT6358_ID_VDRAM1 = 0, + MT6358_ID_VCORE, + MT6358_ID_VPA, + MT6358_ID_VPROC11, + MT6358_ID_VPROC12, + MT6358_ID_VGPU, + MT6358_ID_VS2, + MT6358_ID_VMODEM, + MT6358_ID_VS1, + MT6358_ID_VDRAM2 = 9, + MT6358_ID_VSIM1, + MT6358_ID_VIBR, + MT6358_ID_VRF12, + MT6358_ID_VIO18, + MT6358_ID_VUSB, + MT6358_ID_VCAMIO, + MT6358_ID_VCAMD, + MT6358_ID_VCN18, + MT6358_ID_VFE28, + MT6358_ID_VSRAM_PROC11, + MT6358_ID_VCN28, + MT6358_ID_VSRAM_OTHERS, + MT6358_ID_VSRAM_GPU, + MT6358_ID_VXO22, + MT6358_ID_VEFUSE, + MT6358_ID_VAUX18, + MT6358_ID_VMCH, + MT6358_ID_VBIF28, + MT6358_ID_VSRAM_PROC12, + MT6358_ID_VCAMA1, + MT6358_ID_VEMC, + MT6358_ID_VIO28, + MT6358_ID_VA12, + MT6358_ID_VRF18, + MT6358_ID_VCN33_BT, + MT6358_ID_VCN33_WIFI, + MT6358_ID_VCAMA2, + MT6358_ID_VMC, + MT6358_ID_VLDO28, + MT6358_ID_VAUD28, + MT6358_ID_VSIM2, + MT6358_ID_RG_MAX, +}; + +#define MT6358_MAX_REGULATOR MT6358_ID_RG_MAX + +#endif /* __LINUX_REGULATOR_MT6358_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 9d9c663987d8..00d6054687dd 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -45,6 +45,9 @@ struct rw_semaphore { #endif raw_spinlock_t wait_lock; struct list_head wait_list; +#ifdef CONFIG_DEBUG_RWSEMS + void *magic; +#endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) # define __RWSEM_DEP_MAP_INIT(lockname) #endif +#ifdef CONFIG_DEBUG_RWSEMS +# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname +#else +# define __DEBUG_RWSEM_INITIALIZER(lockname) +#endif + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED #else @@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ + __DEBUG_RWSEM_INITIALIZER(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 9f51932bd543..b75b28287005 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -25,9 +25,11 @@ #include <linux/resource.h> #include <linux/latencytop.h> #include <linux/sched/prio.h> +#include <linux/sched/types.h> #include <linux/signal_types.h> #include <linux/mm_types_task.h> #include <linux/task_io_accounting.h> +#include <linux/posix-timers.h> #include <linux/rseq.h> /* task_struct member predeclarations (sorted alphabetically): */ @@ -244,27 +246,6 @@ struct prev_cputime { #endif }; -/** - * struct task_cputime - collected CPU time counts - * @utime: time spent in user mode, in nanoseconds - * @stime: time spent in kernel mode, in nanoseconds - * @sum_exec_runtime: total time spent on the CPU, in nanoseconds - * - * This structure groups together three kinds of CPU time that are tracked for - * threads and thread groups. Most things considering CPU time want to group - * these counts together and treat all three of them in parallel. - */ -struct task_cputime { - u64 utime; - u64 stime; - unsigned long long sum_exec_runtime; -}; - -/* Alternate field names when used on cache expirations: */ -#define virt_exp utime -#define prof_exp stime -#define sched_exp sum_exec_runtime - enum vtime_state { /* Task is sleeping or running in a CPU with VTIME inactive: */ VTIME_INACTIVE = 0, @@ -295,6 +276,11 @@ enum uclamp_id { UCLAMP_CNT }; +#ifdef CONFIG_SMP +extern struct root_domain def_root_domain; +extern struct mutex sched_domains_mutex; +#endif + struct sched_info { #ifdef CONFIG_SCHED_INFO /* Cumulative counters: */ @@ -876,10 +862,8 @@ struct task_struct { unsigned long min_flt; unsigned long maj_flt; -#ifdef CONFIG_POSIX_TIMERS - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -#endif + /* Empty if CONFIG_POSIX_CPUTIMERS=n */ + struct posix_cputimers posix_cputimers; /* Process credentials: */ @@ -1767,7 +1751,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) * value indicates whether a reschedule was done in fact. * cond_resched_lock() will drop the spinlock before scheduling, */ -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION extern int _cond_resched(void); #else static inline int _cond_resched(void) { return 0; } @@ -1796,12 +1780,12 @@ static inline void cond_resched_rcu(void) /* * Does a critical section need to be broken due to another - * task waiting?: (technically does not depend on CONFIG_PREEMPT, + * task waiting?: (technically does not depend on CONFIG_PREEMPTION, * but a general need for low latency) */ static inline int spin_needbreak(spinlock_t *lock) { -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION return spin_is_contended(lock); #else return 0; diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h index 53f883f5a2fd..6c9f19a33865 100644 --- a/include/linux/sched/cputime.h +++ b/include/linux/sched/cputime.h @@ -61,8 +61,7 @@ extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, * Thread group CPU time accounting. */ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); -void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); - +void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples); /* * The following are functions that support scheduler-internal time accounting. @@ -71,7 +70,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); */ /** - * get_running_cputimer - return &tsk->signal->cputimer if cputimer is running + * get_running_cputimer - return &tsk->signal->cputimer if cputimers are active * * @tsk: Pointer to target task. */ @@ -81,8 +80,11 @@ struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; - /* Check if cputimer isn't running. This is accessed without locking. */ - if (!READ_ONCE(cputimer->running)) + /* + * Check whether posix CPU timers are active. If not the thread + * group accounting is not active either. Lockless check. + */ + if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active)) return NULL; /* diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 0cb034331cbb..1aff00b65f3c 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -24,3 +24,11 @@ static inline bool dl_time_before(u64 a, u64 b) { return (s64)(a - b) < 0; } + +#ifdef CONFIG_SMP + +struct root_domain; +extern void dl_add_task_root_domain(struct task_struct *p); +extern void dl_clear_root_domain(struct root_domain *rd); + +#endif /* CONFIG_SMP */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index efd8ce7675ed..88050259c466 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -9,6 +9,7 @@ #include <linux/sched/task.h> #include <linux/cred.h> #include <linux/refcount.h> +#include <linux/posix-timers.h> /* * Types defining task->signal and task->sighand and APIs using them: @@ -56,18 +57,12 @@ struct task_cputime_atomic { /** * struct thread_group_cputimer - thread group interval timer counts * @cputime_atomic: atomic thread group interval timers. - * @running: true when there are timers running and - * @cputime_atomic receives updates. - * @checking_timer: true when a thread in the group is in the - * process of checking for thread group timers. * * This structure contains the version of task_cputime, above, that is * used for thread group CPU timer calculations. */ struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic; - bool running; - bool checking_timer; }; struct multiprocess_signals { @@ -148,12 +143,9 @@ struct signal_struct { */ struct thread_group_cputimer cputimer; - /* Earliest-expiration cache. */ - struct task_cputime cputime_expires; - - struct list_head cpu_timers[3]; - #endif + /* Empty if CONFIG_POSIX_TIMERS=n */ + struct posix_cputimers posix_cputimers; /* PID/PID hash table linkage. */ struct pid *pids[PIDTYPE_MAX]; diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 0497091e40c1..3d90ed8f75f0 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -105,7 +105,11 @@ extern void sched_exec(void); #define sched_exec() {} #endif -#define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) +static inline struct task_struct *get_task_struct(struct task_struct *t) +{ + refcount_inc(&t->usage); + return t; +} extern void __put_task_struct(struct task_struct *t); diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 7863bb62d2ab..f341163fedc9 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -150,6 +150,10 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd) return to_cpumask(sd->span); } +extern void partition_sched_domains_locked(int ndoms_new, + cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new); + extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new); @@ -195,6 +199,12 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl); struct sched_domain_attr; static inline void +partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], + struct sched_domain_attr *dattr_new) +{ +} + +static inline void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], struct sched_domain_attr *dattr_new) { diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h new file mode 100644 index 000000000000..3c3e049224ae --- /dev/null +++ b/include/linux/sched/types.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_TYPES_H +#define _LINUX_SCHED_TYPES_H + +#include <linux/types.h> + +/** + * struct task_cputime - collected CPU time counts + * @stime: time spent in kernel mode, in nanoseconds + * @utime: time spent in user mode, in nanoseconds + * @sum_exec_runtime: total time spent on the CPU, in nanoseconds + * + * This structure groups together three kinds of CPU time that are tracked for + * threads and thread groups. Most things considering CPU time want to group + * these counts together and treat all three of them in parallel. + */ +struct task_cputime { + u64 stime; + u64 utime; + unsigned long long sum_exec_runtime; +}; + +#endif diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 9ff2e9357e9a..881fea47c83d 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only */ /* * SCMI Message Protocol driver header * @@ -71,7 +71,7 @@ struct scmi_clk_ops { int (*rate_get)(const struct scmi_handle *handle, u32 clk_id, u64 *rate); int (*rate_set)(const struct scmi_handle *handle, u32 clk_id, - u32 config, u64 rate); + u64 rate); int (*enable)(const struct scmi_handle *handle, u32 clk_id); int (*disable)(const struct scmi_handle *handle, u32 clk_id); }; @@ -145,6 +145,8 @@ struct scmi_sensor_info { u32 id; u8 type; s8 scale; + u8 num_trip_points; + bool async; char name[SCMI_MAX_STR_SIZE]; }; @@ -167,9 +169,9 @@ enum scmi_sensor_class { * * @count_get: get the count of sensors provided by SCMI * @info_get: get the information of the specified sensor - * @configuration_set: control notifications on cross-over events for + * @trip_point_notify: control notifications on cross-over events for * the trip-points - * @trip_point_set: selects and configures a trip-point of interest + * @trip_point_config: selects and configures a trip-point of interest * @reading_get: gets the current value of the sensor */ struct scmi_sensor_ops { @@ -177,12 +179,32 @@ struct scmi_sensor_ops { const struct scmi_sensor_info *(*info_get) (const struct scmi_handle *handle, u32 sensor_id); - int (*configuration_set)(const struct scmi_handle *handle, - u32 sensor_id); - int (*trip_point_set)(const struct scmi_handle *handle, u32 sensor_id, - u8 trip_id, u64 trip_value); + int (*trip_point_notify)(const struct scmi_handle *handle, + u32 sensor_id, bool enable); + int (*trip_point_config)(const struct scmi_handle *handle, + u32 sensor_id, u8 trip_id, u64 trip_value); int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, - bool async, u64 *value); + u64 *value); +}; + +/** + * struct scmi_reset_ops - represents the various operations provided + * by SCMI Reset Protocol + * + * @num_domains_get: get the count of reset domains provided by SCMI + * @name_get: gets the name of a reset domain + * @latency_get: gets the reset latency for the specified reset domain + * @reset: resets the specified reset domain + * @assert: explicitly assert reset signal of the specified reset domain + * @deassert: explicitly deassert reset signal of the specified reset domain + */ +struct scmi_reset_ops { + int (*num_domains_get)(const struct scmi_handle *handle); + char *(*name_get)(const struct scmi_handle *handle, u32 domain); + int (*latency_get)(const struct scmi_handle *handle, u32 domain); + int (*reset)(const struct scmi_handle *handle, u32 domain); + int (*assert)(const struct scmi_handle *handle, u32 domain); + int (*deassert)(const struct scmi_handle *handle, u32 domain); }; /** @@ -194,6 +216,7 @@ struct scmi_sensor_ops { * @perf_ops: pointer to set of performance protocol operations * @clk_ops: pointer to set of clock protocol operations * @sensor_ops: pointer to set of sensor protocol operations + * @reset_ops: pointer to set of reset protocol operations * @perf_priv: pointer to private data structure specific to performance * protocol(for internal use only) * @clk_priv: pointer to private data structure specific to clock @@ -202,6 +225,8 @@ struct scmi_sensor_ops { * protocol(for internal use only) * @sensor_priv: pointer to private data structure specific to sensors * protocol(for internal use only) + * @reset_priv: pointer to private data structure specific to reset + * protocol(for internal use only) */ struct scmi_handle { struct device *dev; @@ -210,11 +235,13 @@ struct scmi_handle { struct scmi_clk_ops *clk_ops; struct scmi_power_ops *power_ops; struct scmi_sensor_ops *sensor_ops; + struct scmi_reset_ops *reset_ops; /* for protocol internal use */ void *perf_priv; void *clk_priv; void *power_priv; void *sensor_priv; + void *reset_priv; }; enum scmi_std_protocol { @@ -224,6 +251,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_PERF = 0x13, SCMI_PROTOCOL_CLOCK = 0x14, SCMI_PROTOCOL_SENSOR = 0x15, + SCMI_PROTOCOL_RESET = 0x16, }; struct scmi_device { diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 54ade13a9b15..f3ae45d02e80 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -63,26 +63,26 @@ void cmdq_pkt_destroy(struct cmdq_pkt *pkt); /** * cmdq_pkt_write() - append write command to the CMDQ packet * @pkt: the CMDQ packet - * @value: the specified target register value * @subsys: the CMDQ sub system code * @offset: register offset from CMDQ sub system + * @value: the specified target register value * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_write(struct cmdq_pkt *pkt, u32 value, u32 subsys, u32 offset); +int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value); /** * cmdq_pkt_write_mask() - append write command with mask to the CMDQ packet * @pkt: the CMDQ packet - * @value: the specified target register value * @subsys: the CMDQ sub system code * @offset: register offset from CMDQ sub system + * @value: the specified target register value * @mask: the specified target register mask * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, - u32 subsys, u32 offset, u32 mask); +int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys, + u16 offset, u32 value, u32 mask); /** * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet @@ -91,7 +91,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u32 value, * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); +int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); /** * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet @@ -100,7 +100,7 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u32 event); * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u32 event); +int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ diff --git a/include/linux/soc/nxp/lpc32xx-misc.h b/include/linux/soc/nxp/lpc32xx-misc.h new file mode 100644 index 000000000000..699c6f1e3aab --- /dev/null +++ b/include/linux/soc/nxp/lpc32xx-misc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Author: Kevin Wells <kevin.wells@nxp.com> + * + * Copyright (C) 2010 NXP Semiconductors + */ + +#ifndef __SOC_LPC32XX_MISC_H +#define __SOC_LPC32XX_MISC_H + +#include <linux/types.h> +#include <linux/phy.h> + +#ifdef CONFIG_ARCH_LPC32XX +extern u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr); +extern void lpc32xx_set_phy_interface_mode(phy_interface_t mode); +extern void lpc32xx_loopback_set(resource_size_t mapbase, int state); +#else +static inline u32 lpc32xx_return_iram(void __iomem **mapbase, dma_addr_t *dmaaddr) +{ + *mapbase = NULL; + *dmaaddr = 0; + return 0; +} +static inline void lpc32xx_set_phy_interface_mode(phy_interface_t mode) +{ +} +static inline void lpc32xx_loopback_set(resource_size_t mapbase, int state) +{ +} +#endif + +#endif /* __SOC_LPC32XX_MISC_H */ diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h new file mode 100644 index 000000000000..8bca6763f99c --- /dev/null +++ b/include/linux/soc/samsung/exynos-chipid.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * + * Exynos - CHIPID support + */ +#ifndef __LINUX_SOC_EXYNOS_CHIPID_H +#define __LINUX_SOC_EXYNOS_CHIPID_H + +#define EXYNOS_CHIPID_REG_PRO_ID 0x00 +#define EXYNOS_SUBREV_MASK (0xf << 4) +#define EXYNOS_MAINREV_MASK (0xf << 0) +#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \ + EXYNOS_MAINREV_MASK) +#define EXYNOS_MASK 0xfffff000 + +#define EXYNOS_CHIPID_REG_PKG_ID 0x04 +/* Bit field definitions for EXYNOS_CHIPID_REG_PKG_ID register */ +#define EXYNOS5422_IDS_OFFSET 24 +#define EXYNOS5422_IDS_MASK 0xff +#define EXYNOS5422_USESG_OFFSET 3 +#define EXYNOS5422_USESG_MASK 0x01 +#define EXYNOS5422_SG_OFFSET 0 +#define EXYNOS5422_SG_MASK 0x07 +#define EXYNOS5422_TABLE_OFFSET 8 +#define EXYNOS5422_TABLE_MASK 0x03 +#define EXYNOS5422_SG_A_OFFSET 17 +#define EXYNOS5422_SG_A_MASK 0x0f +#define EXYNOS5422_SG_B_OFFSET 21 +#define EXYNOS5422_SG_B_MASK 0x03 +#define EXYNOS5422_SG_BSIGN_OFFSET 23 +#define EXYNOS5422_SG_BSIGN_MASK 0x01 +#define EXYNOS5422_BIN2_OFFSET 12 +#define EXYNOS5422_BIN2_MASK 0x01 + +#define EXYNOS_CHIPID_REG_LOT_ID 0x14 + +#define EXYNOS_CHIPID_REG_AUX_INFO 0x1c +/* Bit field definitions for EXYNOS_CHIPID_REG_AUX_INFO register */ +#define EXYNOS5422_TMCB_OFFSET 0 +#define EXYNOS5422_TMCB_MASK 0x7f +#define EXYNOS5422_ARM_UP_OFFSET 8 +#define EXYNOS5422_ARM_UP_MASK 0x03 +#define EXYNOS5422_ARM_DN_OFFSET 10 +#define EXYNOS5422_ARM_DN_MASK 0x03 +#define EXYNOS5422_KFC_UP_OFFSET 12 +#define EXYNOS5422_KFC_UP_MASK 0x03 +#define EXYNOS5422_KFC_DN_OFFSET 14 +#define EXYNOS5422_KFC_DN_MASK 0x03 + +#endif /*__LINUX_SOC_EXYNOS_CHIPID_H */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 6c610e188a44..9531ec823298 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -97,7 +97,10 @@ struct ti_sci_core_ops { */ struct ti_sci_dev_ops { int (*get_device)(const struct ti_sci_handle *handle, u32 id); + int (*get_device_exclusive)(const struct ti_sci_handle *handle, u32 id); int (*idle_device)(const struct ti_sci_handle *handle, u32 id); + int (*idle_device_exclusive)(const struct ti_sci_handle *handle, + u32 id); int (*put_device)(const struct ti_sci_handle *handle, u32 id); int (*is_valid)(const struct ti_sci_handle *handle, u32 id); int (*get_context_loss_count)(const struct ti_sci_handle *handle, diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ed7c4d6b8235..031ce8617df8 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -214,7 +214,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) /* * Define the various spin_lock methods. Note we define these - * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The + * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The * various methods are defined as nops in the case they are not * required. */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740..b762eaba4cdf 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) /* * If lockdep is enabled then we use the non-preemption spin-ops - * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index f0cfd12cb45e..83bd8cb475d7 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -9,9 +9,9 @@ struct task_struct; struct pt_regs; #ifdef CONFIG_STACKTRACE -void stack_trace_print(unsigned long *trace, unsigned int nr_entries, +void stack_trace_print(const unsigned long *trace, unsigned int nr_entries, int spaces); -int stack_trace_snprint(char *buf, size_t size, unsigned long *entries, +int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries, unsigned int nr_entries, int spaces); unsigned int stack_trace_save(unsigned long *store, unsigned int size, unsigned int skipnr); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 361f62bb4a8e..cde3dc18e21a 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -46,13 +46,17 @@ enum dma_sync_target { extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, - phys_addr_t phys, size_t size, + phys_addr_t phys, + size_t mapping_size, + size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, - size_t size, enum dma_data_direction dir, + size_t mapping_size, + size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs); extern void swiotlb_tbl_sync_single(struct device *hwdev, diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h index b7c70c3e953f..48ceea867dd6 100644 --- a/include/linux/sys_soc.h +++ b/include/linux/sys_soc.h @@ -12,6 +12,7 @@ struct soc_device_attribute { const char *machine; const char *family; const char *revision; + const char *serial_number; const char *soc_id; const void *data; }; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 88145da7d140..f7c561c4dcdd 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1402,4 +1402,23 @@ static inline unsigned int ksys_personality(unsigned int personality) return old; } +/* for __ARCH_WANT_SYS_IPC */ +long ksys_semtimedop(int semid, struct sembuf __user *tsops, + unsigned int nsops, + const struct __kernel_timespec __user *timeout); +long ksys_semget(key_t key, int nsems, int semflg); +long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); +long ksys_msgget(key_t key, int msgflg); +long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); +long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, + long msgtyp, int msgflg); +long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, + int msgflg); +long ksys_shmget(key_t key, size_t size, int shmflg); +long ksys_shmdt(char __user *shmaddr); +long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); +long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, + unsigned int nsops, + const struct old_timespec32 __user *timeout); + #endif diff --git a/include/linux/timer.h b/include/linux/timer.h index 282e4f2a532a..1e6650ed066d 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -183,7 +183,7 @@ extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 78b8cc73f12f..93884086f392 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -12,8 +12,7 @@ struct timerqueue_node { }; struct timerqueue_head { - struct rb_root head; - struct timerqueue_node *next; + struct rb_root_cached rb_root; }; @@ -29,13 +28,14 @@ extern struct timerqueue_node *timerqueue_iterate_next( * * @head: head of timerqueue * - * Returns a pointer to the timer node that has the - * earliest expiration time. + * Returns a pointer to the timer node that has the earliest expiration time. */ static inline struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) { - return head->next; + struct rb_node *leftmost = rb_first_cached(&head->rb_root); + + return rb_entry(leftmost, struct timerqueue_node, node); } static inline void timerqueue_init(struct timerqueue_node *node) @@ -43,9 +43,18 @@ static inline void timerqueue_init(struct timerqueue_node *node) RB_CLEAR_NODE(&node->node); } +static inline bool timerqueue_node_queued(struct timerqueue_node *node) +{ + return !RB_EMPTY_NODE(&node->node); +} + +static inline bool timerqueue_node_expires(struct timerqueue_node *node) +{ + return node->expires; +} + static inline void timerqueue_init_head(struct timerqueue_head *head) { - head->head = RB_ROOT; - head->next = NULL; + head->rb_root = RB_ROOT_CACHED; } #endif /* _LINUX_TIMERQUEUE_H */ diff --git a/include/linux/topology.h b/include/linux/topology.h index 47a3e3c08036..eb2fe6edd73c 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -27,6 +27,7 @@ #ifndef _LINUX_TOPOLOGY_H #define _LINUX_TOPOLOGY_H +#include <linux/arch_topology.h> #include <linux/cpumask.h> #include <linux/bitops.h> #include <linux/mmzone.h> @@ -59,6 +60,20 @@ int arch_update_cpu_topology(void); */ #define RECLAIM_DISTANCE 30 #endif + +/* + * The following tunable allows platforms to override the default node + * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are + * sufficiently fast that the default value actually hurts + * performance. + * + * AMD EPYC machines use this because even though the 2-hop distance + * is 32 (3.2x slower than a local memory access) performance actually + * *improves* if allowed to reclaim memory and load balance tasks + * between NUMA nodes 2-hops apart. + */ +extern int __read_mostly node_reclaim_distance; + #ifndef PENALTY_FOR_NODE_WITH_CPUS #define PENALTY_FOR_NODE_WITH_CPUS (1) #endif diff --git a/include/linux/torture.h b/include/linux/torture.h index a620118385bb..6241f59e2d6f 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -86,7 +86,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #define torture_stop_kthread(n, tp) \ _torture_stop_kthread("Stopping " #n " task", &(tp)) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() preempt_schedule() #else #define torture_preempt_schedule() diff --git a/include/linux/usb/samsung_usb_phy.h b/include/linux/usb/samsung_usb_phy.h deleted file mode 100644 index dc0071741695..000000000000 --- a/include/linux/usb/samsung_usb_phy.h +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2012 Samsung Electronics Co.Ltd - * http://www.samsung.com/ - * - * Defines phy types for samsung usb phy controllers - HOST or DEIVCE. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -enum samsung_usb_phy_type { - USB_PHY_TYPE_DEVICE, - USB_PHY_TYPE_HOST, -}; diff --git a/include/linux/wait.h b/include/linux/wait.h index 30c515520fb2..3eb7cae8206c 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -501,8 +501,8 @@ do { \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ - hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \ - hrtimer_init_sleeper(&__t, current); \ + hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ + HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) \ hrtimer_start_range_ns(&__t.timer, timeout, \ current->timer_slack_ns, \ |