summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/ata.h10
-rw-r--r--include/linux/blk-mq-rdma.h10
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bsg-lib.h2
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/ceph/osdmap.h2
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/compat.h18
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuset.h19
-rw-r--r--include/linux/crush/crush.h2
-rw-r--r--include/linux/dax.h1
-rw-r--r--include/linux/device-mapper.h41
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/devpts_fs.h10
-rw-r--r--include/linux/dma-fence.h19
-rw-r--r--include/linux/dma-mapping.h40
-rw-r--r--include/linux/fs.h16
-rw-r--r--include/linux/genalloc.h5
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/iio/common/st_sensors.h7
-rw-r--r--include/linux/iio/iio.h2
-rw-r--r--include/linux/iio/trigger.h4
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/iommu.h12
-rw-r--r--include/linux/ipv6.h6
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/memblock.h6
-rw-r--r--include/linux/memcontrol.h10
-rw-r--r--include/linux/mfd/da9052/da9052.h6
-rw-r--r--include/linux/mfd/da9052/reg.h11
-rw-r--r--include/linux/mlx4/device.h13
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h35
-rw-r--r--include/linux/mlx5/mlx5_ifc.h119
-rw-r--r--include/linux/mlx5/qp.h4
-rw-r--r--include/linux/mlx5/srq.h5
-rw-r--r--include/linux/mlx5/vport.h3
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mm_types.h66
-rw-r--r--include/linux/mmu_notifier.h25
-rw-r--r--include/linux/mtd/nand.h6
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nmi.h8
-rw-r--r--include/linux/nvme-fc-driver.h7
-rw-r--r--include/linux/nvme-fc.h19
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/linux/oom.h22
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/perf/arm_pmu.h4
-rw-r--r--include/linux/perf_event.h13
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/pinctrl/pinconf-generic.h4
-rw-r--r--include/linux/platform_data/hsmmc-omap.h10
-rw-r--r--include/linux/platform_data/omap_drm.h53
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/ptp_clock_kernel.h20
-rw-r--r--include/linux/ptr_ring.h9
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/rcutiny.h8
-rw-r--r--include/linux/reservation.h3
-rw-r--r--include/linux/sched.h56
-rw-r--r--include/linux/skb_array.h3
-rw-r--r--include/linux/skbuff.h41
-rw-r--r--include/linux/spinlock.h31
-rw-r--r--include/linux/spinlock_up.h6
-rw-r--r--include/linux/srcutiny.h13
-rw-r--r--include/linux/srcutree.h3
-rw-r--r--include/linux/swait.h55
-rw-r--r--include/linux/sync_file.h3
-rw-r--r--include/linux/syscalls.h13
-rw-r--r--include/linux/trace_events.h4
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uuid.h14
-rw-r--r--include/linux/vfio.h4
-rw-r--r--include/linux/wait.h45
-rw-r--r--include/linux/workqueue.h4
87 files changed, 744 insertions, 380 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c749eef1daa1..27b4b6615263 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1209,6 +1209,7 @@ static inline bool acpi_has_watchdog(void) { return false; }
#endif
#ifdef CONFIG_ACPI_SPCR_TABLE
+extern bool qdf2400_e44_present;
int parse_spcr(bool earlycon);
#else
static inline int parse_spcr(bool earlycon) { return 0; }
diff --git a/include/linux/ata.h b/include/linux/ata.h
index e65ae4b2ed48..c7a353825450 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -60,7 +60,8 @@ enum {
ATA_ID_FW_REV = 23,
ATA_ID_PROD = 27,
ATA_ID_MAX_MULTSECT = 47,
- ATA_ID_DWORD_IO = 48,
+ ATA_ID_DWORD_IO = 48, /* before ATA-8 */
+ ATA_ID_TRUSTED = 48, /* ATA-8 and later */
ATA_ID_CAPABILITY = 49,
ATA_ID_OLD_PIO_MODES = 51,
ATA_ID_OLD_DMA_MODES = 52,
@@ -889,6 +890,13 @@ static inline bool ata_id_has_dword_io(const u16 *id)
return id[ATA_ID_DWORD_IO] & (1 << 0);
}
+static inline bool ata_id_has_trusted(const u16 *id)
+{
+ if (ata_id_major_version(id) <= 7)
+ return false;
+ return id[ATA_ID_TRUSTED] & (1 << 0);
+}
+
static inline bool ata_id_has_unload(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
new file mode 100644
index 000000000000..b4ade198007d
--- /dev/null
+++ b/include/linux/blk-mq-rdma.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BLK_MQ_RDMA_H
+#define _LINUX_BLK_MQ_RDMA_H
+
+struct blk_mq_tag_set;
+struct ib_device;
+
+int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+ struct ib_device *dev, int first_vec);
+
+#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 25f6a0cb27d3..2a5d52fa90f5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -568,7 +568,6 @@ struct request_queue {
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
- int bsg_job_size;
struct bsg_class_device bsg_dev;
#endif
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index e34dde2da0ef..637a20cfb237 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -24,6 +24,7 @@
#define _BLK_BSG_
#include <linux/blkdev.h>
+#include <scsi/scsi_request.h>
struct request;
struct device;
@@ -37,6 +38,7 @@ struct bsg_buffer {
};
struct bsg_job {
+ struct scsi_request sreq;
struct device *dev;
struct request *req;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c6d96a5f46fd..adf670ecaf94 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -148,6 +148,7 @@ struct ceph_osd_request_target {
int size;
int min_size;
bool sort_bitwise;
+ bool recovery_deletes;
unsigned int flags; /* CEPH_OSD_FLAG_* */
bool paused;
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index a0996cb9faed..af3444a5bfdd 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -272,6 +272,8 @@ bool ceph_is_new_interval(const struct ceph_osds *old_acting,
u32 new_pg_num,
bool old_sort_bitwise,
bool new_sort_bitwise,
+ bool old_recovery_deletes,
+ bool new_recovery_deletes,
const struct ceph_pg *pgid);
bool ceph_osds_changed(const struct ceph_osds *old_acting,
const struct ceph_osds *new_acting,
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 385db08bb8b2..b8281feda9c7 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -158,6 +158,10 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
+#define CEPH_OSDMAP_REQUIRE_JEWEL (1<<16) /* require jewel for booting osds */
+#define CEPH_OSDMAP_REQUIRE_KRAKEN (1<<17) /* require kraken for booting osds */
+#define CEPH_OSDMAP_REQUIRE_LUMINOUS (1<<18) /* require l for booting osds */
+#define CEPH_OSDMAP_RECOVERY_DELETES (1<<19) /* deletes performed during recovery instead of peering */
/*
* The error code to return when an OSD can't handle a write
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5a6a109b4a50..3fc433303d7a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -27,7 +27,7 @@
#endif
#ifndef __SC_DELOUSE
-#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
+#define __SC_DELOUSE(t,v) ((__force t)(unsigned long)(v))
#endif
#define COMPAT_SYSCALL_DEFINE0(name) \
@@ -365,10 +365,10 @@ asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
compat_ulong_t vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_preadv2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
asmlinkage ssize_t compat_sys_pwritev2(compat_ulong_t fd,
const struct compat_iovec __user *vec,
- compat_ulong_t vlen, u32 pos_low, u32 pos_high, int flags);
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high, rwf_t flags);
#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
asmlinkage long compat_sys_preadv64(unsigned long fd,
@@ -382,6 +382,18 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd,
unsigned long vlen, loff_t pos);
#endif
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+asmlinkage long compat_sys_readv64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos, rwf_t flags);
+#endif
+
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index eca8ad75e28b..043b60de041e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -517,7 +517,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
-#define __compiletime_assert(condition, msg, prefix, suffix) \
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
bool __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
@@ -525,6 +526,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
prefix ## suffix(); \
__compiletime_error_fallback(__cond); \
} while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b56573bf440d..82b30e638430 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -39,8 +39,6 @@ enum cpuhp_state {
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
- CPUHP_SCSI_BNX2FC_DEAD,
- CPUHP_SCSI_BNX2I_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 119a3f9604b0..898cfe2eeb42 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,6 +18,19 @@
#ifdef CONFIG_CPUSETS
+/*
+ * Static branch rewrites can happen in an arbitrary order for a given
+ * key. In code paths where we need to loop with read_mems_allowed_begin() and
+ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
+ * to ensure that begin() always gets rewritten before retry() in the
+ * disabled -> enabled transition. If not, then if local irqs are disabled
+ * around the loop, we can deadlock since retry() would always be
+ * comparing the latest value of the mems_allowed seqcount against 0 as
+ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
+ * transition should happen in reverse order for the same reasons (want to stop
+ * looking at real value of mems_allowed.sequence in retry() first).
+ */
+extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
static inline void cpuset_inc(void)
{
+ static_branch_inc(&cpusets_pre_enable_key);
static_branch_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
static_branch_dec(&cpusets_enabled_key);
+ static_branch_dec(&cpusets_pre_enable_key);
}
extern int cpuset_init(void);
@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(&current->mems_allowed_seq);
@@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_enabled_key))
return false;
return read_seqcount_retry(&current->mems_allowed_seq, seq);
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 92e165d417a6..07eed95e10c7 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -193,7 +193,7 @@ struct crush_choose_arg {
struct crush_choose_arg_map {
#ifdef __KERNEL__
struct rb_node node;
- u64 choose_args_index;
+ s64 choose_args_index;
#endif
struct crush_choose_arg *args; /*!< replacement for each bucket
in the crushmap */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 794811875732..df97b7af7e2c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -87,6 +87,7 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t size);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
+bool dax_write_cache_enabled(struct dax_device *dax_dev);
/*
* We use lowest available bit in exceptional entry for locking, one bit for
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 1473455d0341..4f2b3b2076c4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -549,46 +549,29 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*---------------------------------------------------------------*/
#define DM_NAME "device-mapper"
-#ifdef CONFIG_PRINTK
-extern struct ratelimit_state dm_ratelimit_state;
-
-#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
-#else
-#define dm_ratelimit() 0
-#endif
+#define DM_RATELIMIT(pr_func, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&rs)) \
+ pr_func(DM_FMT(fmt), ##__VA_ARGS__); \
+} while (0)
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMERR_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMERR(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMERR_LIMIT(fmt, ...) DM_RATELIMIT(pr_err, fmt, ##__VA_ARGS__)
#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMWARN_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMWARN(fmt, ##__VA_ARGS__); \
-} while (0)
-
+#define DMWARN_LIMIT(fmt, ...) DM_RATELIMIT(pr_warn, fmt, ##__VA_ARGS__)
#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMINFO_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMINFO(fmt, ##__VA_ARGS__); \
-} while (0)
+#define DMINFO_LIMIT(fmt, ...) DM_RATELIMIT(pr_info, fmt, ##__VA_ARGS__)
#ifdef CONFIG_DM_DEBUG
#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
-#define DMDEBUG_LIMIT(fmt, ...) \
-do { \
- if (dm_ratelimit()) \
- DMDEBUG(fmt, ##__VA_ARGS__); \
-} while (0)
+#define DMDEBUG_LIMIT(fmt, ...) DM_RATELIMIT(pr_debug, fmt, ##__VA_ARGS__)
#else
#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/device.h b/include/linux/device.h
index 723cd54b94da..beabdbc08420 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -843,7 +843,7 @@ struct dev_links_info {
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
* @pins: For device pin management.
- * See Documentation/pinctrl.txt for details.
+ * See Documentation/driver-api/pinctl.rst for details.
* @msi_list: Hosts MSI descriptors
* @msi_domain: The generic MSI domain this device is using.
* @numa_node: NUMA node this device is close to.
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 277ab9af9ac2..100cb4343763 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,7 @@
struct pts_fs_info;
+struct vfsmount *devpts_mntget(struct file *, struct pts_fs_info *);
struct pts_fs_info *devpts_acquire(struct file *);
void devpts_release(struct pts_fs_info *);
@@ -32,6 +33,15 @@ void *devpts_get_priv(struct dentry *);
/* unlink */
void devpts_pty_kill(struct dentry *);
+/* in pty.c */
+int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags);
+
+#else
+static inline int
+ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
+{
+ return -EIO;
+}
#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 0a186c4f3981..171895072435 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -338,6 +338,19 @@ dma_fence_is_signaled(struct dma_fence *fence)
}
/**
+ * __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @f1: [in] the first fence's seqno
+ * @f2: [in] the second fence's seqno from the same context
+ *
+ * Returns true if f1 is chronologically later than f2. Both fences must be
+ * from the same context, since a seqno is not common across contexts.
+ */
+static inline bool __dma_fence_is_later(u32 f1, u32 f2)
+{
+ return (int)(f1 - f2) > 0;
+}
+
+/**
* dma_fence_is_later - return if f1 is chronologically later than f2
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
@@ -351,7 +364,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return (int)(f1->seqno - f2->seqno) > 0;
+ return __dma_fence_is_later(f1->seqno, f2->seqno);
}
/**
@@ -418,8 +431,8 @@ int dma_fence_get_status(struct dma_fence *fence);
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
{
- BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
- BUG_ON(error >= 0 || error < -MAX_ERRNO);
+ WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+ WARN_ON(error >= 0 || error < -MAX_ERRNO);
fence->error = error;
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 843ab866e0f4..03c0196a6f24 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -157,16 +157,40 @@ static inline int is_device_dma_capable(struct device *dev)
* These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
+
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+
#else
-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+
+static inline void *dma_alloc_from_global_coherent(ssize_t size,
+ dma_addr_t *dma_handle)
+{
+ return NULL;
+}
+
+static inline int dma_release_from_global_coherent(int order, void *vaddr)
+{
+ return 0;
+}
+
+static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
+ void *cpu_addr, size_t size,
+ int *ret)
+{
+ return 0;
+}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
#ifdef CONFIG_HAS_DMA
@@ -481,7 +505,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
BUG_ON(!ops);
- if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
if (!arch_dma_alloc_attrs(&dev, &flag))
@@ -503,7 +527,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
BUG_ON(!ops);
WARN_ON(irqs_disabled());
- if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return;
if (!ops->free || !cpu_addr)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 6e1fd5d21248..2625fc47c7e5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -72,6 +72,8 @@ extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
+typedef __kernel_rwf_t rwf_t;
+
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
@@ -907,9 +909,9 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
#define FL_POSIX 1
@@ -1758,9 +1760,9 @@ extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *)
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
+ unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
- unsigned long, loff_t *, int);
+ unsigned long, loff_t *, rwf_t);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
@@ -2874,9 +2876,9 @@ extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
- int flags);
+ rwf_t flags);
/* fs/block_dev.c */
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -3143,7 +3145,7 @@ static inline int iocb_flags(struct file *file)
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, int flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
{
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 29d4385903d4..6dfec4d638df 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -38,12 +38,13 @@ struct device_node;
struct gen_pool;
/**
- * Allocation callback function type definition
+ * typedef genpool_algo_t: Allocation callback function type definition
* @map: Pointer to bitmap
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
- * @data: optional additional data used by @genpool_algo_t
+ * @data: optional additional data used by the callback
+ * @pool: the pool being allocated from
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 00ca5b86a753..d501d3956f13 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -689,7 +689,8 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
-#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
+/* Warn users that the adapter doesn't support classes anymore */
+#define I2C_CLASS_DEPRECATED (1<<8)
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 497f2b3a5a62..97f1b465d04f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -105,6 +105,11 @@ struct st_sensor_fullscale {
struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
};
+struct st_sensor_sim {
+ u8 addr;
+ u8 value;
+};
+
/**
* struct st_sensor_bdu - ST sensor device block data update
* @addr: address of the register.
@@ -197,6 +202,7 @@ struct st_sensor_transfer_function {
* @bdu: Block data update register.
* @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
+ * @sim: SPI serial interface mode register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
*/
@@ -213,6 +219,7 @@ struct st_sensor_settings {
struct st_sensor_bdu bdu;
struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
+ struct st_sensor_sim sim;
bool multi_read_bit;
unsigned int bootime;
};
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index d68bec297a45..c380daa40c0e 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -535,7 +535,7 @@ struct iio_buffer_setup_ops {
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
* @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly [INTERN] mark the current trigger immutable
+ * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index ea08302f2d7b..7142d8d6e470 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -144,8 +144,8 @@ void devm_iio_trigger_unregister(struct device *dev,
/**
* iio_trigger_set_immutable() - set an immutable trigger on destination
*
- * @indio_dev - IIO device structure containing the device
- * @trig - trigger to assign to device
+ * @indio_dev: IIO device structure containing the device
+ * @trig: trigger to assign to device
*
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a2f6707e9fc0..0e849715e5be 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -126,17 +126,11 @@ extern struct group_info init_groups;
#endif
#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_TREE_PREEMPT() \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_PREEMPT_RCU
#define INIT_TASK_RCU_PREEMPT(tsk) \
.rcu_read_lock_nesting = 0, \
.rcu_read_unlock_special.s = 0, \
.rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- INIT_TASK_RCU_TREE_PREEMPT()
+ .rcu_blocked_node = NULL,
#else
#define INIT_TASK_RCU_PREEMPT(tsk)
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a2fddddb0d60..59ba11661b6e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
+#include <asm/sections.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
@@ -726,7 +727,6 @@ extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
@@ -734,16 +734,4 @@ extern int arch_early_irq_init(void);
#define __softirq_entry \
__attribute__((__section__(".softirqentry.text")))
-/* Limits of hardirq entrypoints */
-extern char __irqentry_text_start[];
-extern char __irqentry_text_end[];
-/* Limits of softirq entrypoints */
-extern char __softirqentry_text_start[];
-extern char __softirqentry_text_end[];
-
-#else
-#define __irq_entry
-#define __softirq_entry
-#endif
-
#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2cb54adc4a33..176f7569d874 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -240,7 +240,7 @@ struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
- struct device dev;
+ struct device *dev;
};
int iommu_device_register(struct iommu_device *iommu);
@@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
iommu->fwnode = fwnode;
}
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return (struct iommu_device *)dev_get_drvdata(dev);
+}
+
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
{
}
+static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
+{
+ return NULL;
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index e1b442996f81..474d6bbc158c 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -128,6 +128,7 @@ struct inet6_skb_parm {
#define IP6SKB_FRAGMENTED 16
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
+#define IP6SKB_JUMBOGRAM 128
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -152,6 +153,11 @@ static inline int inet6_iif(const struct sk_buff *skb)
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+static inline bool inet6_is_jumbogram(const struct sk_buff *skb)
+{
+ return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM);
+}
+
/* can not be used in TCP layer after tcp_v6_fill_cb */
static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
{
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 00db35b61e9e..d2d543794093 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -388,7 +388,12 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_mask_ack: ack and mask an interrupt source
* @irq_unmask: unmask an interrupt source
* @irq_eoi: end of interrupt
- * @irq_set_affinity: set the CPU affinity on SMP machines
+ * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force
+ * argument is true, it tells the driver to
+ * unconditionally apply the affinity setting. Sanity
+ * checks against the supplied affinity mask are not
+ * required. This is used for CPU hotplug where the
+ * target CPU is not yet set in the cpu_online_mask.
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 4fec8b775895..82e197eeac91 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -15,7 +15,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @...: arguments for @namefmt.
+ * @arg...: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 648b34cabb38..21a6fd6c44af 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -445,6 +445,7 @@ struct kvm {
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
+ pid_t userspace_pid;
};
#define kvm_err(fmt, ...) \
@@ -476,7 +477,8 @@ struct kvm {
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock));
+ lockdep_is_held(&kvm->slots_lock) ||
+ !refcount_read(&kvm->users_count));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
@@ -569,7 +571,8 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock));
+ lockdep_is_held(&kvm->slots_lock) ||
+ !refcount_read(&kvm->users_count));
}
static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 55de3da58b1c..931c32f1f18d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -435,7 +435,7 @@ enum {
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
+ ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 77d427974f57..bae11c7e7bf3 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -61,6 +61,7 @@ extern int memblock_debug;
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
+void memblock_discard(void);
#else
#define __init_memblock
#define __initdata_memblock
@@ -74,8 +75,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
int nid, ulong flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size);
@@ -110,6 +109,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
phys_addr_t *out_end);
+void __memblock_free_early(phys_addr_t base, phys_addr_t size);
+void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+
/**
* for_each_mem_range - iterate through memblock areas from type_a and not
* included in type_b. Or just type_a if type_b is NULL.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 3914e3dd6168..9b15a4bcfa77 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -484,7 +484,8 @@ bool mem_cgroup_oom_synchronize(bool wait);
extern int do_swap_account;
#endif
-void lock_page_memcg(struct page *page);
+struct mem_cgroup *lock_page_memcg(struct page *page);
+void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
@@ -809,7 +810,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void lock_page_memcg(struct page *page)
+static inline struct mem_cgroup *lock_page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
{
}
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index ce9230af09c2..ae5b663836d0 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -45,6 +45,12 @@
#define DA9052_ADC_TJUNC 8
#define DA9052_ADC_VBBAT 9
+/* TSI channel has its own 4 channel mux */
+#define DA9052_ADC_TSI_XP 70
+#define DA9052_ADC_TSI_XN 71
+#define DA9052_ADC_TSI_YP 72
+#define DA9052_ADC_TSI_YN 73
+
#define DA9052_IRQ_DCIN 0
#define DA9052_IRQ_VBUS 1
#define DA9052_IRQ_DCINREM 2
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
index 5010f978725c..76780ea8849c 100644
--- a/include/linux/mfd/da9052/reg.h
+++ b/include/linux/mfd/da9052/reg.h
@@ -690,7 +690,10 @@
/* TSI CONTROL REGISTER B BITS */
#define DA9052_TSICONTB_ADCREF 0X80
#define DA9052_TSICONTB_TSIMAN 0X40
-#define DA9052_TSICONTB_TSIMUX 0X30
+#define DA9052_TSICONTB_TSIMUX_XP 0X00
+#define DA9052_TSICONTB_TSIMUX_YP 0X10
+#define DA9052_TSICONTB_TSIMUX_XN 0X20
+#define DA9052_TSICONTB_TSIMUX_YN 0X30
#define DA9052_TSICONTB_TSISEL3 0X08
#define DA9052_TSICONTB_TSISEL2 0X04
#define DA9052_TSICONTB_TSISEL1 0X02
@@ -705,8 +708,14 @@
/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
#define DA9052_TSILSB_PENDOWN 0X40
#define DA9052_TSILSB_TSIZL 0X30
+#define DA9052_TSILSB_TSIZL_SHIFT 4
+#define DA9052_TSILSB_TSIZL_BITS 2
#define DA9052_TSILSB_TSIYL 0X0C
+#define DA9052_TSILSB_TSIYL_SHIFT 2
+#define DA9052_TSILSB_TSIYL_BITS 2
#define DA9052_TSILSB_TSIXL 0X03
+#define DA9052_TSILSB_TSIXL_SHIFT 0
+#define DA9052_TSILSB_TSIXL_BITS 2
/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
#define DA9052_TSIZMSB_TSIZM 0XFF
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index aad5d81dfb44..c8a63e148a98 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -428,6 +428,12 @@ enum mlx4_steer_type {
MLX4_NUM_STEERS
};
+enum mlx4_resource_usage {
+ MLX4_RES_USAGE_NONE,
+ MLX4_RES_USAGE_DRIVER,
+ MLX4_RES_USAGE_USER_VERBS,
+};
+
enum {
MLX4_NUM_FEXCH = 64 * 1024,
};
@@ -620,6 +626,7 @@ struct mlx4_caps {
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
u32 vf_caps;
+ bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
};
@@ -748,6 +755,7 @@ struct mlx4_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
+ u8 usage;
};
struct mlx4_qp {
@@ -757,6 +765,7 @@ struct mlx4_qp {
atomic_t refcount;
struct completion free;
+ u8 usage;
};
struct mlx4_srq {
@@ -1120,7 +1129,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
- int *base, u8 flags);
+ int *base, u8 flags, u8 usage);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
@@ -1417,7 +1426,7 @@ int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
-int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f31a0b5377e1..c13d71deaeca 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -290,6 +290,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -305,6 +306,10 @@ enum mlx5_event {
};
enum {
+ MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+};
+
+enum {
MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
@@ -968,7 +973,7 @@ enum mlx5_cap_type {
MLX5_CAP_ATOMIC,
MLX5_CAP_ROCE,
MLX5_CAP_IPOIB_OFFLOADS,
- MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
@@ -1011,6 +1016,10 @@ enum mlx5_mcam_feature_groups {
MLX5_GET(per_protocol_networking_offload_caps,\
mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+
#define MLX5_CAP_ROCE(mdev, cap) \
MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index df6ce59a1f95..b3fc9d586a9f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -162,6 +162,13 @@ enum dbg_rsc_type {
MLX5_DBG_RSC_CQ,
};
+enum port_state_policy {
+ MLX5_POLICY_DOWN = 0,
+ MLX5_POLICY_UP = 1,
+ MLX5_POLICY_FOLLOW = 2,
+ MLX5_POLICY_INVALID = 0xffffffff
+};
+
struct mlx5_field_desc {
struct dentry *dent;
int i;
@@ -185,6 +192,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
MLX5_DEV_EVENT_PPS,
+ MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
};
enum mlx5_port_status {
@@ -291,7 +299,7 @@ struct mlx5_cmd {
struct semaphore pages_sem;
int mode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
- struct pci_pool *pool;
+ struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
@@ -410,6 +418,7 @@ enum mlx5_res_type {
MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
+ MLX5_RES_XRQ = 5,
};
struct mlx5_core_rsc_common {
@@ -525,6 +534,9 @@ struct mlx5_mkey_table {
struct mlx5_vf_context {
int enabled;
+ u64 port_guid;
+ u64 node_guid;
+ enum port_state_policy policy;
};
struct mlx5_core_sriov {
@@ -534,7 +546,6 @@ struct mlx5_core_sriov {
};
struct mlx5_irq_info {
- cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
};
@@ -597,7 +608,6 @@ struct mlx5_port_module_event_stats {
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
- struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
/* pages stuff */
@@ -673,9 +683,7 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN = BIT(0),
- MLX5_INTERFACE_STATE_UP = BIT(1),
- MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+ MLX5_INTERFACE_STATE_UP = BIT(0),
};
enum mlx5_pci_status {
@@ -842,13 +850,6 @@ struct mlx5_pas {
u8 log_sz;
};
-enum port_state_policy {
- MLX5_POLICY_DOWN = 0,
- MLX5_POLICY_UP = 1,
- MLX5_POLICY_FOLLOW = 2,
- MLX5_POLICY_INVALID = 0xffffffff
-};
-
enum phy_port_state {
MLX5_AAA_111
};
@@ -1091,7 +1092,7 @@ enum {
};
enum {
- MAX_UMR_CACHE_ENTRY = 20,
+ MR_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MR_CACHE_ENTRIES
@@ -1185,4 +1186,10 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+static inline const struct cpumask *
+mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
+{
+ return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector);
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 87869c04849a..b7338a21c780 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -200,6 +200,7 @@ enum {
MLX5_CMD_OP_QUERY_SQ = 0x907,
MLX5_CMD_OP_CREATE_RQ = 0x908,
MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910,
MLX5_CMD_OP_DESTROY_RQ = 0x90a,
MLX5_CMD_OP_QUERY_RQ = 0x90b,
MLX5_CMD_OP_CREATE_RMP = 0x90c,
@@ -294,8 +295,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
+ u8 reserved_at_40[0x1a];
+ u8 bth_dst_qp[0x1];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_5b[0x25];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -431,7 +434,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0xe0];
+ u8 reserved_at_120[0x28];
+ u8 bth_dst_qp[0x18];
+ u8 reserved_at_160[0xa0];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -599,7 +604,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 rss_ind_tbl_cap[0x4];
u8 reg_umr_sq[0x1];
u8 scatter_fcs[0x1];
- u8 reserved_at_1a[0x1];
+ u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_at_1c[0x2];
u8 tunnel_statless_gre[0x1];
@@ -840,7 +845,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 retransmission_q_counters[0x1];
u8 reserved_at_183[0x1];
u8 modify_rq_counter_set_id[0x1];
- u8 reserved_at_185[0x1];
+ u8 rq_delay_drop[0x1];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -857,7 +862,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
- u8 reserved_at_1b1[0x1];
+ u8 enhanced_error_q_counters[0x1];
u8 ports_check[0x1];
u8 reserved_at_1b3[0x1];
u8 disable_link_up[0x1];
@@ -873,7 +878,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_tc[0x4];
u8 reserved_at_1d0[0x1];
u8 dcbx[0x1];
- u8 reserved_at_1d2[0x3];
+ u8 general_notification_event[0x1];
+ u8 reserved_at_1d3[0x2];
u8 fpga[0x1];
u8 rol_s[0x1];
u8 rol_g[0x1];
@@ -1016,7 +1022,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_at_3e1[0xa];
+ u8 disable_local_lb[0x1];
+ u8 reserved_at_3e2[0x9];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
u8 log_max_current_mc_list[0x5];
@@ -1187,7 +1194,8 @@ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
u8 reserved_at_c0[0x12];
u8 cnp_dscp[0x6];
- u8 reserved_at_d8[0x5];
+ u8 reserved_at_d8[0x4];
+ u8 cnp_prio_mode[0x1];
u8 cnp_802p_prio[0x3];
u8 reserved_at_e0[0x720];
@@ -2015,6 +2023,10 @@ enum {
};
enum {
+ MLX5_QPC_OFFLOAD_TYPE_RNDV = 0x1,
+};
+
+enum {
MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
};
@@ -2057,7 +2069,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_at_15[0x7];
+ u8 reserved_at_15[0x3];
+ u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2437,7 +2450,7 @@ struct mlx5_ifc_sqc_bits {
u8 cd_master[0x1];
u8 fre[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_at_4[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
u8 min_wqe_inline_mode[0x3];
u8 state[0x4];
u8 reg_umr[0x1];
@@ -2515,7 +2528,7 @@ enum {
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_at_1[0x1];
+ u8 delay_drop_en[0x1];
u8 scatter_fcs[0x1];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
@@ -2562,7 +2575,9 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
- u8 reserved_at_8[0x17];
+ u8 reserved_at_8[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
@@ -3000,7 +3015,7 @@ struct mlx5_ifc_xrqc_bits {
struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
- u8 reserved_at_180[0x880];
+ u8 reserved_at_180[0x280];
struct mlx5_ifc_wq_bits wq;
};
@@ -3947,7 +3962,47 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0x4e0];
+ u8 reserved_at_320[0xa0];
+
+ u8 resp_local_length_error[0x20];
+
+ u8 req_local_length_error[0x20];
+
+ u8 resp_local_qp_error[0x20];
+
+ u8 local_operation_error[0x20];
+
+ u8 resp_local_protection[0x20];
+
+ u8 req_local_protection[0x20];
+
+ u8 resp_cqe_error[0x20];
+
+ u8 req_cqe_error[0x20];
+
+ u8 req_mw_binding[0x20];
+
+ u8 req_bad_response[0x20];
+
+ u8 req_remote_invalid_request[0x20];
+
+ u8 resp_remote_invalid_request[0x20];
+
+ u8 req_remote_access_errors[0x20];
+
+ u8 resp_remote_access_errors[0x20];
+
+ u8 req_remote_operation_errors[0x20];
+
+ u8 req_transport_retries_exceeded[0x20];
+
+ u8 cq_overflow[0x20];
+
+ u8 resp_cqe_flush_error[0x20];
+
+ u8 req_cqe_flush_error[0x20];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -5229,7 +5284,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x16];
+ u8 reserved_at_0[0x14];
+ u8 disable_uc_local_lb[0x1];
+ u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
u8 port_guid[0x1];
u8 min_inline[0x1];
@@ -5847,6 +5904,28 @@ struct mlx5_ifc_destroy_rq_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_set_delay_drop_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 delay_drop_timeout[0x10];
+};
+
+struct mlx5_ifc_set_delay_drop_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_destroy_rmp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7749,8 +7828,10 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7f];
+ u8 reserved_at_0[0x7d];
+ u8 mtpps_enh_out_per_adj[0x1];
+ u8 mtpps_fs[0x1];
u8 pcie_performance_group[0x1];
};
@@ -8159,7 +8240,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_78[0x4];
u8 cap_pin_4_mode[0x4];
- u8 reserved_at_80[0x80];
+ u8 field_select[0x20];
+ u8 reserved_at_a0[0x60];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -8174,8 +8256,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 out_pulse_duration[0x10];
u8 out_periodic_adjustment[0x10];
+ u8 enhanced_out_periodic_adjustment[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 reserved_at_1c0[0x20];
};
struct mlx5_ifc_mtppse_reg_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 6f41270d80c0..66d19b611fe4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -212,7 +212,6 @@ struct mlx5_wqe_ctrl_seg {
#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
-#define MLX5_WQE_AV_EXT 0x80000000
enum {
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
@@ -561,6 +560,9 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
+ u32 timeout_usec);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
index 1cde0fd53f90..24ff23e27c8a 100644
--- a/include/linux/mlx5/srq.h
+++ b/include/linux/mlx5/srq.h
@@ -38,6 +38,7 @@
enum {
MLX5_SRQ_FLAG_ERR = (1 << 0),
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+ MLX5_SRQ_FLAG_RNDV = (1 << 2),
};
struct mlx5_srq_attr {
@@ -56,6 +57,10 @@ struct mlx5_srq_attr {
u32 user_index;
u64 db_record;
__be64 *pas;
+ u32 tm_log_list_size;
+ u32 tm_next_tag;
+ u32 tm_hw_phase_cnt;
+ u32 tm_sw_phase_cnt;
};
struct mlx5_core_dev;
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 656c70b65dd2..aaa0bb9e7655 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -114,5 +114,6 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
int vf,
struct mlx5_hca_vport_context *req);
-
+int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
+int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 46b9ac5e8569..c1f6c95f3496 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1260,6 +1260,7 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ unsigned long *start, unsigned long *end,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ff151814a02d..3cadee0a3508 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -487,13 +487,15 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
/*
* An operation with batched TLB flushing is going on. Anything that
* can move process memory needs to flush the TLB when moving a
* PROT_NONE or PROT_NUMA mapped page.
*/
- bool tlb_flush_pending;
+ atomic_t tlb_flush_pending;
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
@@ -518,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
-#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end);
+
/*
* Memory barriers to keep this state in sync are graciously provided by
* the page table locks, outside of which no page table modifications happen.
- * The barriers below prevent the compiler from re-ordering the instructions
- * around the memory barriers that are already present in the code.
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
+ * which happen while the lock is not taken, and the PTE updates, which happen
+ * while the lock is taken, are serialized.
*/
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
{
- barrier();
- return mm->tlb_flush_pending;
+ return atomic_read(&mm->tlb_flush_pending) > 0;
}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
+
+/*
+ * Returns true if there are two above TLB batching threads in parallel.
+ */
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
{
- mm->tlb_flush_pending = true;
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
+}
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
/*
- * Guarantee that the tlb_flush_pending store does not leak into the
+ * Guarantee that the tlb_flush_pending increase does not leak into the
* critical section updating the page tables
*/
smp_mb__before_spinlock();
}
+
/* Clearing is done after a TLB flush, which also provides a barrier. */
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
-{
- barrier();
- mm->tlb_flush_pending = false;
-}
-#else
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
- return false;
-}
-static inline void set_tlb_flush_pending(struct mm_struct *mm)
-{
-}
-static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
{
+ /*
+ * Guarantee that the tlb_flush_pending does not not leak into the
+ * critical section, since we must order the PTE change and changes to
+ * the pending TLB flush indication. We could have relied on TLB flush
+ * as a memory barrier, but this behavior is not clearly documented.
+ */
+ smp_mb__before_atomic();
+ atomic_dec(&mm->tlb_flush_pending);
}
-#endif
struct vm_fault;
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index c91b3bcd158f..7b2e31b1745a 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -95,17 +95,6 @@ struct mmu_notifier_ops {
pte_t pte);
/*
- * Before this is invoked any secondary MMU is still ok to
- * read/write to the page previously pointed to by the Linux
- * pte because the page hasn't been freed yet and it won't be
- * freed until this returns. If required set_page_dirty has to
- * be called internally to this method.
- */
- void (*invalidate_page)(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address);
-
- /*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_sem and/or the
* locks protecting the reverse maps are held. If the subsystem
@@ -220,8 +209,6 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address);
extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
@@ -268,13 +255,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
__mmu_notifier_change_pte(mm, address, pte);
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
-{
- if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_page(mm, address);
-}
-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@@ -442,11 +422,6 @@ static inline void mmu_notifier_change_pte(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
- unsigned long address)
-{
-}
-
static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 892148c448cc..5216d2eb2289 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -681,10 +681,10 @@ struct nand_buffers {
* @tWW_min: WP# transition to WE# low
*/
struct nand_sdr_timings {
- u32 tBERS_max;
+ u64 tBERS_max;
u32 tCCS_min;
- u32 tPROG_max;
- u32 tR_max;
+ u64 tPROG_max;
+ u64 tR_max;
u32 tALH_min;
u32 tADL_min;
u32 tALS_min;
diff --git a/include/linux/net.h b/include/linux/net.h
index dda2cc939a53..ebeb48c92005 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -37,7 +37,7 @@ struct net;
/* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
- * Eventually all flags will be in sk->sk_wq_flags.
+ * Eventually all flags will be in sk->sk_wq->flags.
*/
#define SOCKWQ_ASYNC_NOSPACE 0
#define SOCKWQ_ASYNC_WAITDATA 1
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 779b23595596..c99ba7914c0a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3866,6 +3866,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
void *netdev_lower_get_next_private(struct net_device *dev,
struct list_head **iter);
void *netdev_lower_get_next_private_rcu(struct net_device *dev,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ca3bcc4ed4e5..62cbcb842f99 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1235,7 +1235,7 @@ struct nfs41_state_protection {
struct nfs41_exchange_id_args {
struct nfs_client *client;
- nfs4_verifier *verifier;
+ nfs4_verifier verifier;
u32 flags;
struct nfs41_state_protection state_protect;
};
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 8aa01fd859fb..a36abe2da13e 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -168,6 +168,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
#define sysctl_softlockup_all_cpu_backtrace 0
#define sysctl_hardlockup_all_cpu_backtrace 0
#endif
+
+#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
+ defined(CONFIG_HARDLOCKUP_DETECTOR)
+void watchdog_update_hrtimer_threshold(u64 period);
+#else
+static inline void watchdog_update_hrtimer_threshold(u64 period) { }
+#endif
+
extern bool is_hardlockup(void);
struct ctl_table;
extern int proc_watchdog(struct ctl_table *, int ,
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 6c8c5d8041b7..2591878c1d48 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -346,6 +346,11 @@ struct nvme_fc_remote_port {
* indicating an FC transport Aborted status.
* Entrypoint is Mandatory.
*
+ * @defer_rcv: Called by the transport to signal the LLLD that it has
+ * begun processing of a previously received NVME CMD IU. The LLDD
+ * is now free to re-use the rcv buffer associated with the
+ * nvmefc_tgt_fcp_req.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -846,6 +851,8 @@ struct nvmet_fc_target_template {
struct nvmefc_tgt_fcp_req *fcpreq);
void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*defer_rcv)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 21c37e39e41a..36cca93a5ff2 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -334,5 +334,24 @@ struct fcnvme_ls_disconnect_acc {
#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+/*
+ * TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
+ * the string is allowed to be specified with or without a "0x" prefix
+ * infront of the <16hexdigits>. Without is considered the "min" string
+ * and with is considered the "max" string. The hexdigits may be upper
+ * or lower case.
+ */
+#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
+#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
+#define NVME_FC_TRADDR_HEXNAMELEN 16
+#define NVME_FC_TRADDR_MINLENGTH \
+ (2 * (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MAXLENGTH \
+ (2 * (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN) + 1)
+#define NVME_FC_TRADDR_MIN_PN_OFFSET \
+ (NVME_FC_TRADDR_NNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+#define NVME_FC_TRADDR_MAX_PN_OFFSET \
+ (NVME_FC_TRADDR_OXNNLEN + NVME_FC_TRADDR_HEXNAMELEN + 1)
+
#endif /* _NVME_FC_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index bc74da018bdc..8efff888bd9b 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -254,7 +254,7 @@ enum {
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
- NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
};
struct nvme_lbaf {
@@ -1006,7 +1006,7 @@ static inline bool nvme_is_write(struct nvme_command *cmd)
* Why can't we simply have a Fabrics In and Fabrics out command?
*/
if (unlikely(cmd->common.opcode == nvme_fabrics_command))
- return cmd->fabrics.opcode & 1;
+ return cmd->fabrics.fctype & 1;
return cmd->common.opcode & 1;
}
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 8a266e2be5a6..76aac4ce39bc 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -6,6 +6,8 @@
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
+#include <linux/sched/coredump.h> /* MMF_* */
+#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
struct notifier_block;
@@ -63,6 +65,26 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
return tsk->signal->oom_mm;
}
+/*
+ * Checks whether a page fault on the given mm is still reliable.
+ * This is no longer true if the oom reaper started to reap the
+ * address space which is reflected by MMF_UNSTABLE flag set in
+ * the mm. At that moment any !shared mapping would lose the content
+ * and could cause a memory corruption (zero pages instead of the
+ * original content).
+ *
+ * User should call this before establishing a page table entry for
+ * a !shared mapping and under the proper page table lock.
+ *
+ * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
+ */
+static inline int check_stable_address_space(struct mm_struct *mm)
+{
+ if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ return VM_FAULT_SIGBUS;
+ return 0;
+}
+
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index baa9344dcd10..79b36f57c3ba 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -163,8 +163,6 @@ void release_pages(struct page **pages, int nr, bool cold);
*/
static inline int page_cache_get_speculative(struct page *page)
{
- VM_BUG_ON(in_interrupt());
-
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic() && !irqs_disabled());
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4869e66dd659..da05e5db06ac 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -188,6 +188,8 @@ enum pci_dev_flags {
* the direct_complete optimization.
*/
PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11),
+ /* Don't use Relaxed Ordering for TLPs directed at this device */
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 12),
};
enum pci_irq_reroute_variant {
@@ -729,6 +731,7 @@ struct pci_driver {
void (*shutdown) (struct pci_dev *dev);
int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
const struct pci_error_handlers *err_handler;
+ const struct attribute_group **groups;
struct device_driver driver;
struct pci_dynids dynids;
};
@@ -1067,6 +1070,7 @@ void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
+int pci_reset_function_locked(struct pci_dev *dev);
int pci_try_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
int pci_reset_slot(struct pci_slot *slot);
@@ -1125,6 +1129,7 @@ bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
+bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 1360dd6d5e61..af0f44effd44 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -24,10 +24,14 @@
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
+ *
+ * @irq_flags: if non-zero, these flags will be passed to request_irq
+ * when requesting interrupts for this PMU device.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
+ unsigned long irq_flags;
};
#ifdef CONFIG_ARM_PMU
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a3b873fc59e4..718ba163c1b9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -147,9 +147,6 @@ struct hw_perf_event {
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
};
- struct { /* itrace */
- int itrace_started;
- };
struct { /* amd_power */
u64 pwr_acc;
u64 ptsc;
@@ -310,8 +307,8 @@ struct pmu {
* Notification that the event was mapped or unmapped. Called
* in the context of the mapping task.
*/
- void (*event_mapped) (struct perf_event *event); /*optional*/
- void (*event_unmapped) (struct perf_event *event); /*optional*/
+ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
+ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
/*
* Flags for ->add()/->del()/ ->start()/->stop(). There are
@@ -541,6 +538,7 @@ struct swevent_hlist {
#define PERF_ATTACH_GROUP 0x02
#define PERF_ATTACH_TASK 0x04
#define PERF_ATTACH_TASK_DATA 0x08
+#define PERF_ATTACH_ITRACE 0x10
struct perf_cgroup;
struct ring_buffer;
@@ -864,6 +862,7 @@ extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
+extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -944,6 +943,8 @@ struct perf_sample_data {
struct perf_regs regs_intr;
u64 stack_user_size;
+
+ u64 phys_addr;
} ____cacheline_aligned;
/* default value for data source */
@@ -1201,7 +1202,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
- struct task_struct *task);
+ struct task_struct *task, struct perf_event *event);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2a9567bb8186..0bb5b212ab42 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -830,7 +830,7 @@ static inline int phy_read_status(struct phy_device *phydev)
dev_err(&_phydev->mdio.dev, format, ##args)
#define phydev_dbg(_phydev, format, args...) \
- dev_dbg(&_phydev->mdio.dev, format, ##args);
+ dev_dbg(&_phydev->mdio.dev, format, ##args)
static inline const char *phydev_name(const struct phy_device *phydev)
{
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 4d179316e431..719582744a2e 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -8,7 +8,9 @@ enum pid_type
PIDTYPE_PID,
PIDTYPE_PGID,
PIDTYPE_SID,
- PIDTYPE_MAX
+ PIDTYPE_MAX,
+ /* only valid to __task_pid_nr_ns() */
+ __PIDTYPE_TGID
};
/*
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 231d3075815a..e91d1b6a260d 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -81,8 +81,8 @@
* it.
* @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
* value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/pinctrl.txt, section
- * "GPIO mode pitfalls" for a discussion around this parameter.)
+ * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
+ * section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index 8e981be2e2c2..0ff1e0dba720 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -55,9 +55,6 @@ struct omap_hsmmc_platform_data {
u32 caps; /* Used for the MMC driver on 2430 and later */
u32 pm_caps; /* PM capabilities of the mmc */
- /* use the internal clock */
- unsigned internal_clock:1;
-
/* nonremovable e.g. eMMC */
unsigned nonremovable:1;
@@ -73,13 +70,6 @@ struct omap_hsmmc_platform_data {
int gpio_cd; /* gpio (card detect) */
int gpio_cod; /* gpio (cover detect) */
int gpio_wp; /* gpio (write protect) */
-
- int (*set_power)(struct device *dev, int power_on, int vdd);
- void (*remux)(struct device *dev, int power_on);
- /* Call back before enabling / disabling regulators */
- void (*before_set_reg)(struct device *dev, int power_on, int vdd);
- /* Call back after enabling / disabling regulators */
- void (*after_set_reg)(struct device *dev, int power_on, int vdd);
/* if we have special card, init it using this callback */
void (*init_card)(struct mmc_card *card);
diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h
deleted file mode 100644
index f4e4a237ebd2..000000000000
--- a/include/linux/platform_data/omap_drm.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * DRM/KMS platform data for TI OMAP platforms
- *
- * Copyright (C) 2012 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __PLATFORM_DATA_OMAP_DRM_H__
-#define __PLATFORM_DATA_OMAP_DRM_H__
-
-/*
- * Optional platform data to configure the default configuration of which
- * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
- * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
- * one manager, with priority given to managers that are connected to
- * detected devices. Remaining overlays are used as video planes. This
- * should be a good default behavior for most cases, but yet there still
- * might be times when you wish to do something different.
- */
-struct omap_kms_platform_data {
- /* overlays to use as CRTCs: */
- int ovl_cnt;
- const int *ovl_ids;
-
- /* overlays to use as video planes: */
- int pln_cnt;
- const int *pln_ids;
-
- int mgr_cnt;
- const int *mgr_ids;
-
- int dev_cnt;
- const char **dev_names;
-};
-
-struct omap_drm_platform_data {
- uint32_t omaprev;
- struct omap_kms_platform_data *kms_pdata;
-};
-
-#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 79b0e4cdb814..f8274b0c6888 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -17,10 +17,12 @@
* Available only for accelerometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
* @open_drain: set the interrupt line to be open drain if possible.
+ * @spi_3wire: enable spi-3wire mode.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
+ bool spi_3wire;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index a026bfd089db..51349d124ee5 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -99,6 +99,11 @@ struct system_device_crosststamp;
* parameter func: the desired function to use.
* parameter chan: the function channel index to use.
*
+ * @do_work: Request driver to perform auxiliary (periodic) operations
+ * Driver should return delay of the next auxiliary work scheduling
+ * time (>=0) or negative value in case further scheduling
+ * is not required.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -126,6 +131,7 @@ struct ptp_clock_info {
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
+ long (*do_aux_work)(struct ptp_clock_info *ptp);
};
struct ptp_clock;
@@ -211,6 +217,16 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+/**
+ * ptp_schedule_worker() - schedule ptp auxiliary work
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ * @delay: number of jiffies to wait before queuing
+ * See kthread_queue_delayed_work() for more info.
+ */
+
+int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -225,6 +241,10 @@ static inline int ptp_clock_index(struct ptp_clock *ptp)
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_schedule_worker(struct ptp_clock *ptp,
+ unsigned long delay)
+{ return -EOPNOTSUPP; }
+
#endif
#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index d8c97ec8a8e6..37b4bb2545b3 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -436,9 +436,9 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
__PTR_RING_PEEK_CALL_v; \
})
-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
+static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
+ return kcalloc(size, sizeof(void *), gfp);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -582,7 +582,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
* In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so.
*/
-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
+static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
+ unsigned int nrings,
int size,
gfp_t gfp, void (*destroy)(void *))
{
@@ -590,7 +591,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
void ***queues;
int i;
- queues = kmalloc(nrings * sizeof *queues, gfp);
+ queues = kmalloc_array(nrings, sizeof(*queues), gfp);
if (!queues)
goto noqueues;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index f816fc72b51e..96f1baf62ab8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -58,8 +58,6 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
-void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
-void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
#ifdef CONFIG_PREEMPT_RCU
@@ -105,11 +103,13 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
void rcu_init(void);
+extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
+void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -164,8 +164,6 @@ static inline void rcu_init_nohz(void) { }
* macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define TASKS_RCU(x) x
-extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch_lite(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
@@ -176,10 +174,17 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
rcu_all_qs(); \
rcu_note_voluntary_context_switch_lite(t); \
} while (0)
+void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
+void synchronize_rcu_tasks(void);
+void exit_tasks_rcu_start(void);
+void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define call_rcu_tasks call_rcu_sched
+#define synchronize_rcu_tasks synchronize_sched
+static inline void exit_tasks_rcu_start(void) { }
+static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 5becbbccb998..b3dbf9502fd0 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -116,13 +116,11 @@ static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
static inline void exit_rcu(void) { }
-
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
-extern int rcu_scheduler_active __read_mostly;
+#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+#else /* #ifndef CONFIG_SRCU */
static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
+#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_is_watching(void) { return true; }
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 156cfd330b66..21fc84d82d41 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -254,6 +254,9 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
unsigned *pshared_count,
struct dma_fence ***pshared);
+int reservation_object_copy_fences(struct reservation_object *dst,
+ struct reservation_object *src);
+
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8337e2db0bb2..b57c7bedb534 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -589,9 +589,10 @@ struct task_struct {
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
- bool rcu_tasks_holdout;
- struct list_head rcu_tasks_holdout_list;
+ u8 rcu_tasks_holdout;
+ u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
+ struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
struct sched_info sched_info;
@@ -1163,13 +1164,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
return tsk->tgid;
}
-extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return pid_vnr(task_tgid(tsk));
-}
-
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
@@ -1185,23 +1179,6 @@ static inline int pid_alive(const struct task_struct *p)
return p->pids[PIDTYPE_PID].pid != NULL;
}
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
-
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
@@ -1223,6 +1200,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
+}
+
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
/* Obsolete, do not use: */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 35226cd4efb0..8621ffdeecbf 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -193,7 +193,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
}
static inline int skb_array_resize_multiple(struct skb_array **rings,
- int nrings, int size, gfp_t gfp)
+ int nrings, unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
return ptr_ring_resize_multiple((struct ptr_ring **)rings,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbe29b6c9bd6..d67a8182e5eb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -973,7 +973,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
-int skb_pad(struct sk_buff *skb, int pad);
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
+
+/**
+ * skb_pad - zero pad the tail of an skb
+ * @skb: buffer to pad
+ * @pad: space to pad
+ *
+ * Ensure that a buffer is followed by a padding area that is zero
+ * filled. Used by network drivers which may DMA or transfer data
+ * beyond the buffer end onto the wire.
+ *
+ * May return error in out of memory cases. The skb is freed on error.
+ */
+static inline int skb_pad(struct sk_buff *skb, int pad)
+{
+ return __skb_pad(skb, pad, true);
+}
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -2825,25 +2841,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* skb_put_padto - increase size and pad an skbuff up to a minimal size
* @skb: buffer to pad
* @len: minimal length
+ * @free_on_error: free buffer on error
*
* Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it
* is untouched. Otherwise it is extended. Returns zero on
- * success. The skb is freed on error.
+ * success. The skb is freed on error if @free_on_error is true.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
if (unlikely(size < len)) {
len -= size;
- if (skb_pad(skb, len))
+ if (__skb_pad(skb, len, free_on_error))
return -ENOMEM;
__skb_put(skb, len);
}
return 0;
}
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ return __skb_put_padto(skb, len, true);
+}
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d9510e8522d4..ef018a6e4985 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,12 +130,6 @@ do { \
#define smp_mb__before_spinlock() smp_wmb()
#endif
-/**
- * raw_spin_unlock_wait - wait until the spinlock gets unlocked
- * @lock: the spinlock in question.
- */
-#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
-
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
@@ -369,31 +363,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
-/**
- * spin_unlock_wait - Interpose between successive critical sections
- * @lock: the spinlock whose critical sections are to be interposed.
- *
- * Semantically this is equivalent to a spin_lock() immediately
- * followed by a spin_unlock(). However, most architectures have
- * more efficient implementations in which the spin_unlock_wait()
- * cannot block concurrent lock acquisition, and in some cases
- * where spin_unlock_wait() does not write to the lock variable.
- * Nevertheless, spin_unlock_wait() can have high overhead, so if
- * you feel the need to use it, please check to see if there is
- * a better way to get your job done.
- *
- * The ordering guarantees provided by spin_unlock_wait() are:
- *
- * 1. All accesses preceding the spin_unlock_wait() happen before
- * any accesses in later critical sections for this same lock.
- * 2. All accesses following the spin_unlock_wait() happen after
- * any accesses in earlier critical sections for this same lock.
- */
-static __always_inline void spin_unlock_wait(spinlock_t *lock)
-{
- raw_spin_unlock_wait(&lock->rlock);
-}
-
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0d9848de677d..612fb530af41 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -26,11 +26,6 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define arch_spin_is_locked(x) ((x)->slock == 0)
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->slock, VAL);
-}
-
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
lock->slock = 0;
@@ -73,7 +68,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
-#define arch_spin_unlock_wait(lock) do { barrier(); (void)(lock); } while (0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index cfbfc540cafc..261471f407a5 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -87,4 +87,17 @@ static inline void srcu_barrier(struct srcu_struct *sp)
synchronize_srcu(sp);
}
+/* Defined here to avoid size increase for non-torture kernels. */
+static inline void srcu_torture_stats_print(struct srcu_struct *sp,
+ char *tt, char *tf)
+{
+ int idx;
+
+ idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ tt, tf, idx,
+ READ_ONCE(sp->srcu_lock_nesting[!idx]),
+ READ_ONCE(sp->srcu_lock_nesting[idx]));
+}
+
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 42973f787e7e..a949f4f9e4d7 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -104,8 +104,6 @@ struct srcu_struct {
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
-void process_srcu(struct work_struct *work);
-
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
@@ -141,5 +139,6 @@ void process_srcu(struct work_struct *work);
void synchronize_srcu_expedited(struct srcu_struct *sp);
void srcu_barrier(struct srcu_struct *sp);
+void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf);
#endif
diff --git a/include/linux/swait.h b/include/linux/swait.h
index c1f9c62a8a50..4a4e180d0a35 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -169,4 +169,59 @@ do { \
__ret; \
})
+#define __swait_event_idle(wq, condition) \
+ (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
+
+/**
+ * swait_event_idle - wait without system load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ */
+#define swait_event_idle(wq, condition) \
+do { \
+ if (condition) \
+ break; \
+ __swait_event_idle(wq, condition); \
+} while (0)
+
+#define __swait_event_idle_timeout(wq, condition, timeout) \
+ ___swait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_IDLE, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * swait_event_idle_timeout - wait up to timeout without load contribution
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout at which we'll give up in jiffies
+ *
+ * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
+ * true. The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * This function is mostly used when a kthread or workqueue waits for some
+ * condition and doesn't want to contribute to system load. Signals are
+ * ignored.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define swait_event_idle_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __swait_event_idle_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_SWAIT_H */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index 5726107963b2..0ad87c434ae6 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,12 +43,13 @@ struct sync_file {
#endif
wait_queue_head_t wq;
+ unsigned long flags;
struct dma_fence *fence;
struct dma_fence_cb cb;
};
-#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
struct sync_file *sync_file_create(struct dma_fence *fence);
struct dma_fence *sync_file_get_fence(int fd);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3cb15ea48aee..138c94535864 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -100,11 +100,12 @@ union bpf_attr;
#define __MAP(n,...) __MAP##n(__VA_ARGS__)
#define __SC_DECL(t, a) t a
-#define __TYPE_IS_L(t) (__same_type((t)0, 0L))
-#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
-#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
+#define __TYPE_AS(t, v) __same_type((__force t)0, v)
+#define __TYPE_IS_L(t) (__TYPE_AS(t, 0L))
+#define __TYPE_IS_UL(t) (__TYPE_AS(t, 0UL))
+#define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
-#define __SC_CAST(t, a) (t) a
+#define __SC_CAST(t, a) (__force t) a
#define __SC_ARGS(t, a) a
#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
@@ -578,12 +579,12 @@ asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_preadv2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- int flags);
+ rwf_t flags);
asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
asmlinkage long sys_pwritev2(unsigned long fd, const struct iovec __user *vec,
unsigned long vlen, unsigned long pos_l, unsigned long pos_h,
- int flags);
+ rwf_t flags);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 536c80ff7ad9..5012b524283d 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -508,9 +508,9 @@ void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
- struct task_struct *task)
+ struct task_struct *task, struct perf_event *event)
{
- perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
+ perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
}
#endif
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index acdd6f915a8d..20ef8e6ec2db 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -156,7 +156,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
-copy_in_user(void __user *to, const void *from, unsigned long n)
+copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
might_fault();
if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 2251e1925ea4..33b0bdbb613c 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -84,26 +84,12 @@ int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
/* backwards compatibility, don't use in new code */
-typedef uuid_t uuid_be;
-#define UUID_BE(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
- UUID_INIT(a, _b, c, d0, d1, d2, d3, d4, d5, d6, d7)
-#define NULL_UUID_BE \
- UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
- 0x00, 0x00, 0x00, 0x00)
-
#define uuid_le_gen(u) guid_gen(u)
-#define uuid_be_gen(u) uuid_gen(u)
#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
-#define uuid_be_to_bin(uuid, u) uuid_parse(uuid, u)
static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
{
return memcmp(&u1, &u2, sizeof(guid_t));
}
-static inline int uuid_be_cmp(const uuid_t u1, const uuid_t u2)
-{
- return memcmp(&u1, &u2, sizeof(uuid_t));
-}
-
#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 586809abb273..a47b985341d1 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -152,7 +152,7 @@ extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr,
size_t *data_size);
struct pci_dev;
-#ifdef CONFIG_EEH
+#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH)
extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
@@ -173,7 +173,7 @@ static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
{
return -ENOTTY;
}
-#endif /* CONFIG_EEH */
+#endif /* CONFIG_VFIO_SPAPR_EEH */
/*
* IRQfd - generic
diff --git a/include/linux/wait.h b/include/linux/wait.h
index b289c96151ee..dc19880c02f5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -529,13 +529,13 @@ do { \
/**
* wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
- * @wq_head: the waitqueue to wait on
+ * @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, as a ktime_t
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq_head is woken up.
+ * The @condition is checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
@@ -735,12 +735,12 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
/**
* wait_event_killable - sleep until a condition gets true
- * @wq: the waitqueue to wait on
+ * @wq_head: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_KILLABLE) until the
* @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
@@ -757,6 +757,43 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_killable_timeout(wq_head, condition, timeout) \
+ ___wait_event(wq_head, ___wait_cond_timeout(condition), \
+ TASK_KILLABLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq_head: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a kill signal is received.
+ * The @condition is checked each time the waitqueue @wq_head is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a kill signal.
+ *
+ * Only kill signals interrupt this process.
+ */
+#define wait_event_killable_timeout(wq_head, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_killable_timeout(wq_head, \
+ condition, timeout); \
+ __ret; \
+})
+
#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index c102ef65cb64..db6dc9dc0482 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -323,6 +323,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
+ __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
+ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
OpenPOWER on IntegriCloud