summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h27
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/bitmap.h18
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/bvec.h3
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/cec-funcs.h78
-rw-r--r--include/linux/cec.h5
-rw-r--r--include/linux/compiler-gcc.h10
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpuhotplug.h4
-rw-r--r--include/linux/devfreq-event.h5
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/efi.h74
-rw-r--r--include/linux/fence.h2
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fscrypto.h5
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/ftrace.h17
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/linux/hypervisor.h17
-rw-r--r--include/linux/iio/sw_trigger.h2
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/iomap.h8
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/irqchip/arm-gic-v3.h1
-rw-r--r--include/linux/jump_label.h18
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/lglock.h81
-rw-r--r--include/linux/list.h7
-rw-r--r--include/linux/mempolicy.h4
-rw-r--r--include/linux/mfd/da8xx-cfgchip.h153
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mm.h3
-rw-r--r--include/linux/mmzone.h18
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/msi.h2
-rw-r--r--include/linux/netdevice.h4
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h4
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/pagemap.h39
-rw-r--r--include/linux/pci.h23
-rw-r--r--include/linux/percpu-rwsem.h108
-rw-r--r--include/linux/perf/arm_pmu.h13
-rw-r--r--include/linux/perf_event.h28
-rw-r--r--include/linux/pm_domain.h74
-rw-r--r--include/linux/printk.h48
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/qed/qed_if.h8
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/sched.h124
-rw-r--r--include/linux/sctp.h64
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/skbuff.h52
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/smc91x.h10
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/thread_info.h40
-rw-r--r--include/linux/torture.h2
-rw-r--r--include/linux/u64_stats_sync.h45
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/wait.h17
73 files changed, 1000 insertions, 406 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4d8452c2384b..19e650c940b6 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -85,6 +85,8 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return dev_name(&adev->dev);
}
+struct device *acpi_get_first_physical_node(struct acpi_device *adev);
+
enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PIC = 0,
ACPI_IRQ_MODEL_IOAPIC,
@@ -267,12 +269,18 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
return phys_id == PHYS_CPUID_INVALID;
}
+/* Validate the processor object's proc_id */
+bool acpi_processor_validate_proc_id(int proc_id);
+
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
int acpi_unmap_cpu(int cpu);
+int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+void acpi_set_processor_mapping(void);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -634,6 +642,11 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
return NULL;
}
+static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline void acpi_early_init(void) { }
static inline void acpi_subsystem_init(void) { }
@@ -751,6 +764,12 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_ioapic_add(acpi_handle root);
+#else
+static inline int acpi_ioapic_add(acpi_handle root) { return 0; }
+#endif
+
#ifdef CONFIG_ACPI
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl));
@@ -1056,7 +1075,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
return NULL;
}
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
= { (void *) table_id, \
@@ -1074,4 +1093,10 @@ void acpi_table_upgrade(void);
static inline void acpi_table_upgrade(void) { }
#endif
+#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG)
+extern bool acpi_has_watchdog(void);
+#else
+static inline bool acpi_has_watchdog(void) { return false; }
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 59ffaa68b11b..23ddf4b46a9b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -71,7 +71,8 @@ static inline bool bio_has_data(struct bio *bio)
{
if (bio &&
bio->bi_iter.bi_size &&
- bio_op(bio) != REQ_OP_DISCARD)
+ bio_op(bio) != REQ_OP_DISCARD &&
+ bio_op(bio) != REQ_OP_SECURE_ERASE)
return true;
return false;
@@ -79,7 +80,9 @@ static inline bool bio_has_data(struct bio *bio)
static inline bool bio_no_advance_iter(struct bio *bio)
{
- return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_WRITE_SAME;
+ return bio_op(bio) == REQ_OP_DISCARD ||
+ bio_op(bio) == REQ_OP_SECURE_ERASE ||
+ bio_op(bio) == REQ_OP_WRITE_SAME;
}
static inline bool bio_is_rw(struct bio *bio)
@@ -199,6 +202,9 @@ static inline unsigned bio_segments(struct bio *bio)
if (bio_op(bio) == REQ_OP_DISCARD)
return 1;
+ if (bio_op(bio) == REQ_OP_SECURE_ERASE)
+ return 1;
+
if (bio_op(bio) == REQ_OP_WRITE_SAME)
return 1;
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 598bc999f4c2..3b77588a9360 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -339,6 +339,24 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
+/*
+ * bitmap_from_u64 - Check and swap words within u64.
+ * @mask: source bitmap
+ * @dst: destination bitmap
+ *
+ * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
+ * to read u64 mask, we will get the wrong word.
+ * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
+ * but we expect the lower 32-bits of u64.
+ */
+static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+{
+ dst[0] = mask & ULONG_MAX;
+
+ if (sizeof(mask) > sizeof(unsigned long))
+ dst[1] = mask >> 32;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c210b6a7bcf..e79055c8b577 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -882,7 +882,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
- if (unlikely(op == REQ_OP_DISCARD))
+ if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
if (unlikely(op == REQ_OP_WRITE_SAME))
@@ -913,7 +913,9 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
if (unlikely(rq->cmd_type != REQ_TYPE_FS))
return q->limits.max_hw_sectors;
- if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
+ if (!q->limits.chunk_sectors ||
+ req_op(rq) == REQ_OP_DISCARD ||
+ req_op(rq) == REQ_OP_SECURE_ERASE)
return blk_queue_get_max_sectors(q, req_op(rq));
return min(blk_max_size_offset(q, offset),
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 701b64a3b7c5..89b65b82d98f 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -74,7 +74,8 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
"Attempted to advance past end of bvec iter\n");
while (bytes) {
- unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+ unsigned iter_len = bvec_iter_len(bv, *iter);
+ unsigned len = min(bytes, iter_len);
bytes -= len;
iter->bi_size -= len;
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5261751f6bd4..5f5270941ba0 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,7 @@ enum can_mode {
* CAN common private data
*/
struct can_priv {
+ struct net_device *dev;
struct can_device_stats can_stats;
struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
- struct timer_list restart_timer;
+ struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
index 82c3d3b7269d..138bbf721e70 100644
--- a/include/linux/cec-funcs.h
+++ b/include/linux/cec-funcs.h
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
/* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg)
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_RECORD_OFF;
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
*msg++ = (digital->channel.channel_number_fmt << 2) |
(digital->channel.major >> 8);
- *msg++ = digital->channel.major && 0xff;
+ *msg++ = digital->channel.major & 0xff;
*msg++ = digital->channel.minor >> 8;
*msg++ = digital->channel.minor & 0xff;
*msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
}
static inline void cec_msg_record_on(struct cec_msg *msg,
+ bool reply,
const struct cec_op_record_src *rec_src)
{
switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
rec_src->ext_phys_addr.phys_addr);
break;
}
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
}
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+ __u8 size, const __u8 *vendor_cmd)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+ memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+ __u32 vendor_id, __u8 size,
+ const __u8 *vendor_cmd)
+{
+ if (size > 11)
+ size = 11;
+ msg->len = 5 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+ memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+ __u32 *vendor_id, __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 5;
+
+ if (*size > 11)
+ *size = 11;
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+ *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+ __u8 size,
+ const __u8 *rc_code)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+ memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **rc_code)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *rc_code = msg->msg + 2;
+}
+
static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
{
msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
- msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
+ msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
break;
diff --git a/include/linux/cec.h b/include/linux/cec.h
index b3e22893a002..851968e803fa 100644
--- a/include/linux/cec.h
+++ b/include/linux/cec.h
@@ -364,7 +364,7 @@ struct cec_caps {
* @num_log_addrs: how many logical addresses should be claimed. Set by the
* caller.
* @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: set to 0.
+ * @flags: flags.
* @osd_name: the OSD name of the device. Set by the caller.
* @primary_device_type: the primary device type for each logical address.
* Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
__u8 features[CEC_MAX_LOG_ADDRS][12];
};
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+
/* Events */
/* Event that occurs when the adapter state changes */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e2949397c19b..573c5a18908f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -158,7 +158,7 @@
#define __compiler_offsetof(a, b) \
__builtin_offsetof(a, b)
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
@@ -242,7 +242,11 @@
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+/*
+ * sparse (__CHECKER__) pretends to be gcc, but can't do constant
+ * folding in __builtin_bswap*() (yet), so don't set these for it.
+ */
+#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
@@ -250,7 +254,7 @@
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 1bb954842725..668569844d37 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*
- * The seemingly unused void * variable is to validate @p is indeed a pointer
- * type. All pointer types silently cast to void *.
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
- __maybe_unused const void * const _________p2 = _________p1; \
+ typeof(*(p)) *___typecheck_p __maybe_unused; \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 797d9c8e9a1b..ad4f1f33a74e 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -228,7 +228,11 @@ static inline void cpu_hotplug_done(void) {}
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
-extern int disable_nonboot_cpus(void);
+extern int freeze_secondary_cpus(int primary);
+static inline int disable_nonboot_cpus(void)
+{
+ return freeze_secondary_cpus(0);
+}
extern void enable_nonboot_cpus(void);
#else /* !CONFIG_PM_SLEEP_SMP */
static inline int disable_nonboot_cpus(void) { return 0; }
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 242bf530edfc..eb445a4e2a83 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,6 +1,8 @@
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
+#include <linux/types.h>
+
enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_CREATE_THREADS,
@@ -45,6 +47,8 @@ enum cpuhp_state {
CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
+ CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
+ CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 0a83a1e648b0..4db00b02ca3f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
return -EINVAL;
}
-static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
-{
- return ERR_PTR(-EINVAL);
-}
-
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
struct device *dev, int index)
{
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 66533e18276c..dc69df04abc1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_mmap_writecombine dma_mmap_wc
#endif
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7f5a58225385..2d089487d2da 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -20,6 +20,7 @@
#include <linux/ioport.h>
#include <linux/pfn.h>
#include <linux/pstore.h>
+#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
#include <linux/screen_info.h>
@@ -37,6 +38,7 @@
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
typedef unsigned long efi_status_t;
@@ -118,6 +120,15 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
/*
* EFI capsule flags
*/
@@ -669,6 +680,18 @@ typedef struct {
unsigned long tables;
} efi_system_table_t;
+/*
+ * Architecture independent structure for describing a memory map for the
+ * benefit of efi_memmap_init_early(), saving us the need to pass four
+ * parameters.
+ */
+struct efi_memory_map_data {
+ phys_addr_t phys_map;
+ unsigned long size;
+ unsigned long desc_version;
+ unsigned long desc_size;
+};
+
struct efi_memory_map {
phys_addr_t phys_map;
void *map;
@@ -676,6 +699,12 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
+ bool late;
+};
+
+struct efi_mem_range {
+ struct range range;
+ u64 attribute;
};
struct efi_fdt_params {
@@ -900,6 +929,16 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
}
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+
+extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
+extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+extern void __init efi_memmap_unmap(void);
+extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
+extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+ struct range *range);
+extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
+ void *buf, struct efi_mem_range *mem);
+
extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
@@ -915,6 +954,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_reserve_boot_services(void);
@@ -946,7 +986,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
- ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
+ (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
(md) = (void *)(md) + (m)->desc_size)
/**
@@ -1127,12 +1167,6 @@ struct efivar_operations {
};
struct efivars {
- /*
- * ->lock protects two things:
- * 1) efivarfs_list and efivars_sysfs_list
- * 2) ->ops calls
- */
- spinlock_t lock;
struct kset *kset;
struct kobject *kobject;
const struct efivar_operations *ops;
@@ -1273,8 +1307,8 @@ struct kobject *efivars_kobject(void);
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head);
-void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-void efivar_entry_remove(struct efivar_entry *entry);
+int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
+int efivar_entry_remove(struct efivar_entry *entry);
int __efivar_entry_delete(struct efivar_entry *entry);
int efivar_entry_delete(struct efivar_entry *entry);
@@ -1291,7 +1325,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
bool block, unsigned long size, void *data);
-void efivar_entry_iter_begin(void);
+int efivar_entry_iter_begin(void);
void efivar_entry_iter_end(void);
int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
@@ -1327,7 +1361,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
#ifdef CONFIG_EFI_RUNTIME_MAP
int efi_runtime_map_init(struct kobject *);
-void efi_runtime_map_setup(void *, int, u32);
int efi_get_runtime_map_size(void);
int efi_get_runtime_map_desc_size(void);
int efi_runtime_map_copy(void *buf, size_t bufsz);
@@ -1337,9 +1370,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
return 0;
}
-static inline void
-efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
-
static inline int efi_get_runtime_map_size(void)
{
return 0;
@@ -1371,11 +1401,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr);
+ struct efi_boot_memmap *map);
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
@@ -1457,4 +1483,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
arch_efi_call_virt_teardown(); \
})
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 8cc719a63728..2ac6fa5f4712 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,8 +49,6 @@ struct fence_cb;
* @timestamp: Timestamp when the fence was signaled.
* @status: Optional, only valid if < 0, must be set before calling
* fence_signal, indicates that the fence has completed with an error.
- * @child_list: list of children fences
- * @active_list: list of active fences
*
* the flags member must be manipulated and read using the appropriate
* atomic ops (bit_*), so taking the spinlock will not be needed most
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3523bf62f328..901e25d495cc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
uncached_acl_sentinel(struct task_struct *task)
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cfa6cde25f8e..76cff18bb032 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */
-extern int fscrypt_process_policy(struct inode *,
- const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
}
/* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
const struct fscrypt_policy *p)
{
return -EOPNOTSUPP;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 58205f33af02..7268ed076be8 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -148,6 +148,7 @@ struct fsnotify_group {
#define FS_PRIO_1 1 /* fanotify content based access control */
#define FS_PRIO_2 2 /* fanotify pre-content access */
unsigned int priority;
+ bool shutdown; /* group is being shut down, don't queue more events */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
@@ -179,7 +180,6 @@ struct fsnotify_group {
spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
- atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
@@ -292,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
@@ -304,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 7d565afe35d2..6f93ac46e7f0 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -795,7 +795,12 @@ struct ftrace_ret_stack {
unsigned long func;
unsigned long long calltime;
unsigned long long subtime;
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
+#endif
+#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+ unsigned long *retp;
+#endif
};
/*
@@ -807,7 +812,10 @@ extern void return_to_handler(void);
extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer);
+ unsigned long frame_pointer, unsigned long *retp);
+
+unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
+ unsigned long ret, unsigned long *retp);
/*
* Sometimes we don't want to trace a function with the function
@@ -870,6 +878,13 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
return -1;
}
+static inline unsigned long
+ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
+ unsigned long *retp)
+{
+ return ret;
+}
+
static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index d2ba7d334039..1ffbf2a8cb99 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -304,6 +304,8 @@ struct tegra_mipi_device;
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
void tegra_mipi_free(struct tegra_mipi_device *device);
+int tegra_mipi_enable(struct tegra_mipi_device *device);
+int tegra_mipi_disable(struct tegra_mipi_device *device);
int tegra_mipi_calibrate(struct tegra_mipi_device *device);
#endif
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
new file mode 100644
index 000000000000..3fa5ef2b3759
--- /dev/null
+++ b/include/linux/hypervisor.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_HYPEVISOR_H
+#define __LINUX_HYPEVISOR_H
+
+/*
+ * Generic Hypervisor support
+ * Juergen Gross <jgross@suse.com>
+ */
+
+#ifdef CONFIG_HYPERVISOR_GUEST
+#include <asm/hypervisor.h>
+#else
+static inline void hypervisor_pin_vcpu(int cpu)
+{
+}
+#endif
+
+#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 5198f8ed08a4..c97eab67558f 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
struct config_item_type *type)
{
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&t->group, name, type);
#endif
}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f8834f820ec2..325f649d77ff 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,8 @@
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
+#include <asm/thread_info.h>
+
#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk) \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
@@ -183,12 +185,21 @@ extern struct task_group root_task_group;
# define INIT_KASAN(tsk)
#endif
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+# define INIT_TASK_TI(tsk) \
+ .thread_info = INIT_THREAD_INFO(tsk), \
+ .stack_refcount = ATOMIC_INIT(1),
+#else
+# define INIT_TASK_TI(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
+ INIT_TASK_TI(tsk) \
.state = 0, \
.stack = init_stack, \
.usage = ATOMIC_INIT(2), \
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3267df461012..3d70ece10313 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,6 +19,11 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/*
+ * Flags for iomap mappings:
+ */
+#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
+
+/*
* Magic value for blkno:
*/
#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
@@ -27,7 +32,8 @@ struct iomap {
sector_t blkno; /* 1st sector of mapping, 512b units */
loff_t offset; /* file offset of mapping, bytes */
u64 length; /* length of mapping, bytes */
- int type; /* type of mapping */
+ u16 type; /* type of mapping */
+ u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b52424eaa0ed..0ac26c892fe2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+ raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags) \
+ raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 56b0b7ec66aa..99ac022edc60 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -337,6 +337,7 @@
*/
#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107
#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109
+#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
#define E_ITS_MAPD_DEVICE_OOR 0x010801
#define E_ITS_MAPC_PROCNUM_OOR 0x010902
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 661af564fae8..a0547c571800 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -21,6 +21,8 @@
*
* DEFINE_STATIC_KEY_TRUE(key);
* DEFINE_STATIC_KEY_FALSE(key);
+ * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
+ * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
* static_branch_likely()
* static_branch_unlikely()
*
@@ -267,9 +269,25 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DECLARE_STATIC_KEY_TRUE(name) \
+ extern struct static_key_true name
+
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DECLARE_STATIC_KEY_FALSE(name) \
+ extern struct static_key_false name
+
+#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
+ struct static_key_true name[count] = { \
+ [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
+ }
+
+#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
+ struct static_key_false name[count] = { \
+ [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
+ }
+
extern bool ____wrong_branch_error(void);
#define static_key_enabled(x) \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d96a6118d26a..74fd6f05bc5b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -259,17 +259,14 @@ static inline void might_fault(void) { }
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(int state);
__printf(1, 2)
-void panic(const char *fmt, ...)
- __noreturn __cold;
+void panic(const char *fmt, ...) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
extern int oops_may_print(void);
-void do_exit(long error_code)
- __noreturn;
-void complete_and_exit(struct completion *, long)
- __noreturn;
+void do_exit(long error_code) __noreturn;
+void complete_and_exit(struct completion *, long) __noreturn;
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01e908ac4a39..9c28b4d4c90b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1113,9 +1113,21 @@ struct kvm_device {
/* create, destroy, and name are mandatory */
struct kvm_device_ops {
const char *name;
+
+ /*
+ * create is called holding kvm->lock and any operations not suitable
+ * to do while holding the lock should be deferred to init (see
+ * below).
+ */
int (*create)(struct kvm_device *dev, u32 type);
/*
+ * init is called after create if create is successful and is called
+ * outside of holding kvm->lock.
+ */
+ void (*init)(struct kvm_device *dev);
+
+ /*
* Destroy is responsible for freeing dev.
*
* Destroy may be called before or after destructors are called
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
deleted file mode 100644
index c92ebd100d9b..000000000000
--- a/include/linux/lglock.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Specialised local-global spinlock. Can only be declared as global variables
- * to avoid overhead and keep things simple (and we don't want to start using
- * these inside dynamically allocated structures).
- *
- * "local/global locks" (lglocks) can be used to:
- *
- * - Provide fast exclusive access to per-CPU data, with exclusive access to
- * another CPU's data allowed but possibly subject to contention, and to
- * provide very slow exclusive access to all per-CPU data.
- * - Or to provide very fast and scalable read serialisation, and to provide
- * very slow exclusive serialisation of data (not necessarily per-CPU data).
- *
- * Brlocks are also implemented as a short-hand notation for the latter use
- * case.
- *
- * Copyright 2009, 2010, Nick Piggin, Novell Inc.
- */
-#ifndef __LINUX_LGLOCK_H
-#define __LINUX_LGLOCK_H
-
-#include <linux/spinlock.h>
-#include <linux/lockdep.h>
-#include <linux/percpu.h>
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-
-#ifdef CONFIG_SMP
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define LOCKDEP_INIT_MAP lockdep_init_map
-#else
-#define LOCKDEP_INIT_MAP(a, b, c, d)
-#endif
-
-struct lglock {
- arch_spinlock_t __percpu *lock;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lock_key;
- struct lockdep_map lock_dep_map;
-#endif
-};
-
-#define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- struct lglock name = { .lock = &name ## _lock }
-
-#define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- static struct lglock name = { .lock = &name ## _lock }
-
-void lg_lock_init(struct lglock *lg, char *name);
-
-void lg_local_lock(struct lglock *lg);
-void lg_local_unlock(struct lglock *lg);
-void lg_local_lock_cpu(struct lglock *lg, int cpu);
-void lg_local_unlock_cpu(struct lglock *lg, int cpu);
-
-void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
-void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
-
-void lg_global_lock(struct lglock *lg);
-void lg_global_unlock(struct lglock *lg);
-
-#else
-/* When !CONFIG_SMP, map lglock to spinlock */
-#define lglock spinlock
-#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
-#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
-#define lg_lock_init(lg, name) spin_lock_init(lg)
-#define lg_local_lock spin_lock
-#define lg_local_unlock spin_unlock
-#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
-#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
-#define lg_global_lock spin_lock
-#define lg_global_unlock spin_unlock
-#endif
-
-#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index 5183138aa932..5809e9a2de5b 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -381,8 +381,11 @@ static inline void list_splice_tail_init(struct list_head *list,
*
* Note that if the list is empty, it returns NULL.
*/
-#define list_first_entry_or_null(ptr, type, member) \
- (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+#define list_first_entry_or_null(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = READ_ONCE(head__->next); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
/**
* list_next_entry - get the next element in list
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4429d255c8ab..5e5b2969d931 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
}
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+extern void mpol_put_task_policy(struct task_struct *);
#else
@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
return -1; /* no node preference */
}
+static inline void mpol_put_task_policy(struct task_struct *task)
+{
+}
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644
index 000000000000..304985e288d2
--- /dev/null
+++ b/include/linux/mfd/da8xx-cfgchip.h
@@ -0,0 +1,153 @@
+/*
+ * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
+#define __LINUX_MFD_DA8XX_CFGCHIP_H
+
+#include <linux/bitops.h>
+
+/* register offset (32-bit registers) */
+#define CFGCHIP(n) ((n) * 4)
+
+/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
+#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
+#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2)
+#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3)
+#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0)
+#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1)
+#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2)
+#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0)
+#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3)
+#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0)
+#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1)
+#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2)
+
+/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
+#define CFGCHIP1_CAP2SRC(n) ((n) << 27)
+#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f)
+#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0)
+#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1)
+#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12)
+#define CFGCHIP1_CAP1SRC(n) ((n) << 22)
+#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f)
+#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0)
+#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1)
+#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12)
+#define CFGCHIP1_CAP0SRC(n) ((n) << 17)
+#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f)
+#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0)
+#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1)
+#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12)
+#define CFGCHIP1_HPIBYTEAD BIT(16)
+#define CFGCHIP1_HPIENA BIT(15)
+#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13)
+#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3)
+#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0)
+#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1)
+#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2)
+#define CFGCHIP1_TBCLKSYNC BIT(12)
+#define CFGCHIP1_AMUTESEL0(n) ((n) << 0)
+#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf)
+#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0)
+#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1)
+#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2)
+#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3)
+#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4)
+#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5)
+#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6)
+#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7)
+#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8)
+
+/* CFGCHIP2 (USB PHY) register bits */
+#define CFGCHIP2_PHYCLKGD BIT(17)
+#define CFGCHIP2_VBUSSENSE BIT(16)
+#define CFGCHIP2_RESET BIT(15)
+#define CFGCHIP2_OTGMODE(n) ((n) << 13)
+#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0)
+#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1)
+#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2)
+#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_USB1PHYCLKMUX BIT(12)
+#define CFGCHIP2_USB2PHYCLKMUX BIT(11)
+#define CFGCHIP2_PHYPWRDN BIT(10)
+#define CFGCHIP2_OTGPWRDN BIT(9)
+#define CFGCHIP2_DATPOL BIT(8)
+#define CFGCHIP2_USB1SUSPENDM BIT(7)
+#define CFGCHIP2_PHY_PLLON BIT(6)
+#define CFGCHIP2_SESENDEN BIT(5)
+#define CFGCHIP2_VBDTCTEN BIT(4)
+#define CFGCHIP2_REFFREQ(n) ((n) << 0)
+#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf)
+#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1)
+#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2)
+#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3)
+#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4)
+#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5)
+#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6)
+#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7)
+#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8)
+#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9)
+
+/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
+#define CFGCHIP3_RMII_SEL BIT(8)
+#define CFGCHIP3_UPP_TX_CLKSRC BIT(6)
+#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5)
+#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
+#define CFGCHIP3_PRUEVTSEL BIT(3)
+#define CFGCHIP3_DIV45PENA BIT(2)
+#define CFGCHIP3_EMA_CLKSRC BIT(1)
+
+/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
+#define CFGCHIP4_AMUTECLR0 BIT(0)
+
+#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 2567a87872b0..7f55b8b41032 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -138,16 +138,16 @@
/*
* time in us for processing a single channel, calculated as follows:
*
- * num cycles = open delay + (sample delay + conv time) * averaging
+ * max num cycles = open delay + (sample delay + conv time) * averaging
*
- * num cycles: 152 + (1 + 13) * 16 = 376
+ * max num cycles: 262143 + (255 + 13) * 16 = 266431
*
* clock frequency: 26MHz / 8 = 3.25MHz
* clock period: 1 / 3.25MHz = 308ns
*
- * processing time: 376 * 308ns = 116us
+ * max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 116 /* microsec */
+#define IDLE_TIMEOUT 83 /* milliseconds */
#define TSCADC_CELLS 2
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 21bc4557b67a..d1f9a581aca8 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -6710,9 +6710,10 @@ struct mlx5_ifc_pude_reg_bits {
};
struct mlx5_ifc_ptys_reg_bits {
- u8 an_disable_cap[0x1];
+ u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
- u8 reserved_at_2[0x6];
+ u8 an_disable_cap[0x1];
+ u8 reserved_at_3[0x5];
u8 local_port[0x8];
u8 reserved_at_10[0xd];
u8 proto_mask[0x3];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 08ed53eeedd5..5f14534f0c90 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2014,10 +2014,13 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern struct file *get_task_exe_file(struct task_struct *task);
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
+extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
+ const struct vm_special_mapping *sm);
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags,
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f2e4e90621ec..7f2ae99e5daf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -68,8 +68,10 @@ extern char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
+# define is_migrate_cma_page(_page) false
#endif
#define for_each_migratetype_order(order, type) \
@@ -826,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
-static inline int populated_zone(struct zone *zone)
+/*
+ * Returns true if a zone has pages managed by the buddy allocator.
+ * All the reclaim decisions have to use this function rather than
+ * populated_zone(). If the whole zone is reserved then we can easily
+ * end up with populated_zone() && !managed_zone().
+ */
+static inline bool managed_zone(struct zone *zone)
+{
+ return zone->managed_pages;
+}
+
+/* Returns true if a zone has memory */
+static inline bool populated_zone(struct zone *zone)
{
- return (!!zone->present_pages);
+ return zone->present_pages;
}
extern int movable_zone;
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d351fd3e1049..e5fb81376e92 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -120,5 +120,5 @@ struct mfc_cache {
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3987b64040c5..19a1c0c2993b 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 4f0bfe5912b2..e8c81fbd5f9c 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -270,6 +270,8 @@ enum {
MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
/* Support PCI MSIX interrupts */
MSI_FLAG_PCI_MSIX = (1 << 3),
+ /* Needs early activate, required for PCI */
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 076df5360ba5..e8d79d4ebcfe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3267,6 +3267,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
+bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
@@ -3891,8 +3892,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
void netdev_rss_key_fill(void *buffer, size_t len);
-int dev_get_nest_level(struct net_device *dev,
- bool (*type_check)(const struct net_device *dev));
+int dev_get_nest_level(struct net_device *dev);
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 80ca889b164e..664da0048625 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -15,6 +15,6 @@ struct nf_acct;
struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
void nfnl_acct_put(struct nf_acct *acct);
void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
-extern int nfnl_acct_overquota(const struct sk_buff *skb,
- struct nf_acct *nfacct);
+int nfnl_acct_overquota(struct net *net, const struct sk_buff *skb,
+ struct nf_acct *nfacct);
#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index d8b37bab2887..7676557ce357 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -794,7 +794,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
- uuid_le hostid;
+ uuid_be hostid;
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 66a1260b33de..01e84436cddf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -571,56 +571,57 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
- int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
- while (uaddr <= end) {
- ret = __put_user(0, uaddr);
- if (ret != 0)
- return ret;
+ do {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
+ return __put_user(0, end);
- return ret;
+ return 0;
}
static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
- int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
- while (uaddr <= end) {
- ret = __get_user(c, uaddr);
- if (ret != 0)
- return ret;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
+
+ do {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
- (void)c;
+ return __get_user(c, end);
}
- return ret;
+ (void)c;
+ return 0;
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2599a980340f..a917d4b20554 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -683,15 +683,6 @@ struct pci_driver {
#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
/**
- * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
- * @_table: device table name
- *
- * This macro is deprecated and should not be used in new code.
- */
-#define DEFINE_PCI_DEVICE_TABLE(_table) \
- const struct pci_device_id _table[]
-
-/**
* PCI_DEVICE - macro used to describe a specific pci device
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
@@ -1135,6 +1126,7 @@ void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
int (*)(const struct pci_dev *, u8, u8));
+struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
#define HAVE_PCI_REQ_REGIONS 2
int __must_check pci_request_regions(struct pci_dev *, const char *);
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
@@ -1251,10 +1243,12 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
-#define PCI_IRQ_NOLEGACY (1 << 0) /* don't use legacy interrupts */
-#define PCI_IRQ_NOMSI (1 << 1) /* don't use MSI interrupts */
-#define PCI_IRQ_NOMSIX (1 << 2) /* don't use MSI-X interrupts */
-#define PCI_IRQ_NOAFFINITY (1 << 3) /* don't auto-assign affinity */
+#define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */
+#define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */
+#define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */
+#define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */
+#define PCI_IRQ_ALL_TYPES \
+ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
/* kmem_cache style wrapper around pci_alloc_consistent() */
@@ -1549,6 +1543,9 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
int enable)
{ return 0; }
+static inline struct resource *pci_find_resource(struct pci_dev *dev,
+ struct resource *res)
+{ return NULL; }
static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index c2fa3ecb0dce..5b2e6159b744 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -10,32 +10,122 @@
struct percpu_rw_semaphore {
struct rcu_sync rss;
- unsigned int __percpu *fast_read_ctr;
+ unsigned int __percpu *read_count;
struct rw_semaphore rw_sem;
- atomic_t slow_read_ctr;
- wait_queue_head_t write_waitq;
+ wait_queue_head_t writer;
+ int readers_block;
};
-extern void percpu_down_read(struct percpu_rw_semaphore *);
-extern int percpu_down_read_trylock(struct percpu_rw_semaphore *);
-extern void percpu_up_read(struct percpu_rw_semaphore *);
+#define DEFINE_STATIC_PERCPU_RWSEM(name) \
+static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
+static struct percpu_rw_semaphore name = { \
+ .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
+ .read_count = &__percpu_rwsem_rc_##name, \
+ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
+ .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
+}
+
+extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
+extern void __percpu_up_read(struct percpu_rw_semaphore *);
+
+static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
+{
+ might_sleep();
+
+ rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+ preempt_disable();
+ /*
+ * We are in an RCU-sched read-side critical section, so the writer
+ * cannot both change sem->state from readers_fast and start checking
+ * counters while we are here. So if we see !sem->state, we know that
+ * the writer won't be checking until we're past the preempt_enable()
+ * and that one the synchronize_sched() is done, the writer will see
+ * anything we did within this RCU-sched read-size critical section.
+ */
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ barrier();
+ /*
+ * The barrier() prevents the compiler from
+ * bleeding the critical section out.
+ */
+}
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_preempt_disable(sem);
+ preempt_enable();
+}
+
+static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+{
+ int ret = 1;
+
+ preempt_disable();
+ /*
+ * Same as in percpu_down_read().
+ */
+ __this_cpu_inc(*sem->read_count);
+ if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ preempt_enable();
+ /*
+ * The barrier() from preempt_enable() prevents the compiler from
+ * bleeding the critical section out.
+ */
+
+ if (ret)
+ rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+
+ return ret;
+}
+
+static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
+{
+ /*
+ * The barrier() prevents the compiler from
+ * bleeding the critical section out.
+ */
+ barrier();
+ /*
+ * Same as in percpu_down_read().
+ */
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ __this_cpu_dec(*sem->read_count);
+ else
+ __percpu_up_read(sem); /* Unconditional memory barrier */
+ preempt_enable();
+
+ rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+ preempt_disable();
+ percpu_up_read_preempt_enable(sem);
+}
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
+
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
-#define percpu_init_rwsem(brw) \
+#define percpu_init_rwsem(sem) \
({ \
static struct lock_class_key rwsem_key; \
- __percpu_init_rwsem(brw, #brw, &rwsem_key); \
+ __percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
-
#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
+#define percpu_rwsem_assert_held(sem) \
+ lockdep_assert_held(&(sem)->rw_sem)
+
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index e18843809eec..9ff07d3fc8de 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
-
+#include <linux/sysfs.h>
#include <asm/cputype.h>
/*
@@ -77,6 +77,13 @@ struct pmu_hw_events {
struct arm_pmu *percpu_pmu;
};
+enum armpmu_attr_groups {
+ ARMPMU_ATTR_GROUP_COMMON,
+ ARMPMU_ATTR_GROUP_EVENTS,
+ ARMPMU_ATTR_GROUP_FORMATS,
+ ARMPMU_NR_ATTR_GROUPS
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@@ -111,6 +118,8 @@ struct arm_pmu {
struct pmu_hw_events __percpu *hw_events;
struct list_head entry;
struct notifier_block cpu_pm_nb;
+ /* the attr_groups array must be NULL-terminated */
+ const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -151,6 +160,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table);
+#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
+
#endif /* CONFIG_ARM_PMU */
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8ed4326164cc..5c5362584aba 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -510,9 +510,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
struct perf_sample_data *,
struct pt_regs *regs);
-enum perf_group_flag {
- PERF_GROUP_SOFTWARE = 0x1,
-};
+/*
+ * Event capabilities. For event_caps and groups caps.
+ *
+ * PERF_EV_CAP_SOFTWARE: Is a software event.
+ * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
+ * from any CPU in the package where it is active.
+ */
+#define PERF_EV_CAP_SOFTWARE BIT(0)
+#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -568,7 +574,12 @@ struct perf_event {
struct hlist_node hlist_entry;
struct list_head active_entry;
int nr_siblings;
- int group_flags;
+
+ /* Not serialized. Only written during event initialization. */
+ int event_caps;
+ /* The cumulative AND of all event_caps for events in this group. */
+ int group_caps;
+
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;
@@ -743,7 +754,9 @@ struct perf_event_context {
u64 parent_gen;
u64 generation;
int pin_count;
+#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
+#endif
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
};
@@ -769,7 +782,12 @@ struct perf_cpu_context {
unsigned int hrtimer_active;
struct pmu *unique_pmu;
+#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
+#endif
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
};
struct perf_output_handle {
@@ -981,7 +999,7 @@ static inline bool is_sampling_event(struct perf_event *event)
*/
static inline int is_software_event(struct perf_event *event)
{
- return event->pmu->task_ctx_nr == perf_sw_context;
+ return event->event_caps & PERF_EV_CAP_SOFTWARE;
}
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 31fec858088c..a09fe5c009c8 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -51,6 +51,8 @@ struct generic_pm_domain {
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
+ struct fwnode_handle *provider; /* Identity of the domain provider */
+ bool has_provider;
const char *name;
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
@@ -116,7 +118,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
@@ -129,6 +130,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
extern int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
+extern int pm_genpd_remove(struct generic_pm_domain *genpd);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -138,10 +140,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
-{
- return NULL;
-}
static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td)
@@ -168,6 +166,10 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
+static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
+{
+ return -ENOTSUPP;
+}
#endif
static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
@@ -192,57 +194,57 @@ struct genpd_onecell_data {
unsigned int num_domains;
};
-typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
- void *data);
-
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
- void *data);
+int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd);
+int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data);
void of_genpd_del_provider(struct device_node *np);
-struct generic_pm_domain *of_genpd_get_from_provider(
- struct of_phandle_args *genpdspec);
-
-struct generic_pm_domain *__of_genpd_xlate_simple(
- struct of_phandle_args *genpdspec,
- void *data);
-struct generic_pm_domain *__of_genpd_xlate_onecell(
- struct of_phandle_args *genpdspec,
- void *data);
+extern int of_genpd_add_device(struct of_phandle_args *args,
+ struct device *dev);
+extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
+ struct of_phandle_args *new_subdomain);
+extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
-static inline int __of_genpd_add_provider(struct device_node *np,
- genpd_xlate_t xlate, void *data)
+static inline int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
{
- return 0;
+ return -ENOTSUPP;
}
-static inline void of_genpd_del_provider(struct device_node *np) {}
-static inline struct generic_pm_domain *of_genpd_get_from_provider(
- struct of_phandle_args *genpdspec)
+static inline int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
{
- return NULL;
+ return -ENOTSUPP;
}
-#define __of_genpd_xlate_simple NULL
-#define __of_genpd_xlate_onecell NULL
+static inline void of_genpd_del_provider(struct device_node *np) {}
-static inline int genpd_dev_pm_attach(struct device *dev)
+static inline int of_genpd_add_device(struct of_phandle_args *args,
+ struct device *dev)
{
return -ENODEV;
}
-#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
-static inline int of_genpd_add_provider_simple(struct device_node *np,
- struct generic_pm_domain *genpd)
+static inline int of_genpd_add_subdomain(struct of_phandle_args *parent,
+ struct of_phandle_args *new_subdomain)
{
- return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
+ return -ENODEV;
}
-static inline int of_genpd_add_provider_onecell(struct device_node *np,
- struct genpd_onecell_data *data)
+
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
- return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
+ return ERR_PTR(-ENOTSUPP);
}
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
#ifdef CONFIG_PM
extern int dev_pm_domain_attach(struct device *dev, bool power_on);
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 8dc155dab3ed..696a56be7d3e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -266,39 +266,21 @@ extern asmlinkage void dump_stack(void) __cold;
* and other debug macros are compiled out unless either DEBUG is defined
* or CONFIG_DYNAMIC_DEBUG is set.
*/
-
-#ifdef CONFIG_PRINTK
-
-asmlinkage __printf(1, 2) __cold void __pr_emerg(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_alert(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_crit(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_err(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_warn(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_notice(const char *fmt, ...);
-asmlinkage __printf(1, 2) __cold void __pr_info(const char *fmt, ...);
-
-#define pr_emerg(fmt, ...) __pr_emerg(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...) __pr_alert(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...) __pr_crit(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...) __pr_err(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...) __pr_warn(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...) __pr_notice(pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) __pr_info(pr_fmt(fmt), ##__VA_ARGS__)
-
-#else
-
-#define pr_emerg(fmt, ...) printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert(fmt, ...) printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit(fmt, ...) printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err(fmt, ...) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn(fmt, ...) printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_notice(fmt, ...) printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
-
-#endif
-
-#define pr_warning pr_warn
-
+#define pr_emerg(fmt, ...) \
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_notice(fmt, ...) \
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/*
* Like KERN_CONT, pr_cont() should only be used when continuing
* a line with no newline ('\n') enclosed. Otherwise it defaults
diff --git a/include/linux/property.h b/include/linux/property.h
index 3a2f9ae25c86..856e50b2140c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -190,7 +190,7 @@ struct property_entry {
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
.is_string = false, \
- { .pointer = { _type_##_data = _val_ } }, \
+ { .pointer = { ._type_##_data = _val_ } }, \
}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b1e3c57c7117..d6c4177df7cb 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params {
u8 max_tc;
};
+enum qed_dcbx_sf_ieee_type {
+ QED_DCBX_SF_IEEE_ETHTYPE,
+ QED_DCBX_SF_IEEE_TCP_PORT,
+ QED_DCBX_SF_IEEE_UDP_PORT,
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
struct qed_app_entry {
bool ethtype;
+ enum qed_dcbx_sf_ieee_type sf_ieee;
bool enabled;
u8 prio;
u16 proto_id;
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index a63a33e6196e..ece7ed9a4a70 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
}
extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
+extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1aa62e1a761b..321f9ed552a9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -334,6 +334,7 @@ void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
+void rcu_cpu_starting(unsigned int cpu);
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 62c68e513e39..7543a476178b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -448,6 +448,8 @@ static inline void io_schedule(void)
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
+void __noreturn do_task_dead(void);
+
struct nsproxy;
struct user_namespace;
@@ -1022,7 +1024,8 @@ extern void wake_up_q(struct wake_q_head *head);
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
+#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
@@ -1064,6 +1067,12 @@ extern int sched_domain_level_max;
struct sched_group;
+struct sched_domain_shared {
+ atomic_t ref;
+ atomic_t nr_busy_cpus;
+ int has_idle_cores;
+};
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
@@ -1094,6 +1103,8 @@ struct sched_domain {
u64 max_newidle_lb_cost;
unsigned long next_decay_max_lb_cost;
+ u64 avg_scan_cost; /* select_idle_sibling */
+
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -1132,6 +1143,7 @@ struct sched_domain {
void *private; /* used during construction */
struct rcu_head rcu; /* used during destruction */
};
+ struct sched_domain_shared *shared;
unsigned int span_weight;
/*
@@ -1165,6 +1177,7 @@ typedef int (*sched_domain_flags_f)(void);
struct sd_data {
struct sched_domain **__percpu sd;
+ struct sched_domain_shared **__percpu sds;
struct sched_group **__percpu sg;
struct sched_group_capacity **__percpu sgc;
};
@@ -1458,6 +1471,13 @@ struct tlbflush_unmap_batch {
};
struct task_struct {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ /*
+ * For reasons of header soup (see current_thread_info()), this
+ * must be the first element of task_struct.
+ */
+ struct thread_info thread_info;
+#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
@@ -1467,6 +1487,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ unsigned int cpu; /* current CPU */
+#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -1923,6 +1946,13 @@ struct task_struct {
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
#endif
+#ifdef CONFIG_VMAP_STACK
+ struct vm_struct *stack_vm_area;
+#endif
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ /* A live task holds one reference. */
+ atomic_t stack_refcount;
+#endif
/* CPU-specific state of this task */
struct thread_struct thread;
/*
@@ -1939,6 +1969,18 @@ extern int arch_task_struct_size __read_mostly;
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
+#ifdef CONFIG_VMAP_STACK
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+ return t->stack_vm_area;
+}
+#else
+static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
+{
+ return NULL;
+}
+#endif
+
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -2568,12 +2610,14 @@ static inline bool is_idle_task(const struct task_struct *p)
return p->pid == 0;
}
extern struct task_struct *curr_task(int cpu);
-extern void set_curr_task(int cpu, struct task_struct *p);
+extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
+#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
@@ -3061,10 +3105,34 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
cgroup_threadgroup_change_end(tsk);
}
-#ifndef __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+
+static inline struct thread_info *task_thread_info(struct task_struct *task)
+{
+ return &task->thread_info;
+}
+
+/*
+ * When accessing the stack of a non-current task that might exit, use
+ * try_get_task_stack() instead. task_stack_page will return a pointer
+ * that could get freed out from under you.
+ */
+static inline void *task_stack_page(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#define setup_thread_stack(new,old) do { } while(0)
+
+static inline unsigned long *end_of_stack(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#elif !defined(__HAVE_THREAD_FUNCTIONS)
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
-#define task_stack_page(task) ((task)->stack)
+#define task_stack_page(task) ((void *)(task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -3091,6 +3159,24 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
}
#endif
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+ return atomic_inc_not_zero(&tsk->stack_refcount) ?
+ task_stack_page(tsk) : NULL;
+}
+
+extern void put_task_stack(struct task_struct *tsk);
+#else
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+ return task_stack_page(tsk);
+}
+
+static inline void put_task_stack(struct task_struct *tsk) {}
+#endif
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
@@ -3206,7 +3292,11 @@ static inline int signal_pending_state(long state, struct task_struct *p)
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
+#ifndef CONFIG_PREEMPT
extern int _cond_resched(void);
+#else
+static inline int _cond_resched(void) { return 0; }
+#endif
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
@@ -3236,6 +3326,15 @@ static inline void cond_resched_rcu(void)
#endif
}
+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ return p->preempt_disable_ip;
+#else
+ return 0;
+#endif
+}
+
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
@@ -3364,7 +3463,11 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
static inline unsigned int task_cpu(const struct task_struct *p)
{
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ return p->cpu;
+#else
return task_thread_info(p)->cpu;
+#endif
}
static inline int task_node(const struct task_struct *p)
@@ -3469,15 +3572,20 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+#define SCHED_CPUFREQ_RT (1U << 0)
+#define SCHED_CPUFREQ_DL (1U << 1)
+#define SCHED_CPUFREQ_IOWAIT (1U << 2)
+
+#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
+
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
- void (*func)(struct update_util_data *data,
- u64 time, unsigned long util, unsigned long max);
+ void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
};
void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
- void (*func)(struct update_util_data *data, u64 time,
- unsigned long util, unsigned long max));
+ void (*func)(struct update_util_data *data, u64 time,
+ unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
#endif /* CONFIG_CPU_FREQ */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index de1f64318fc4..fcb4c3646173 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk {
sctp_authhdr_t auth_hdr;
} __packed sctp_auth_chunk_t;
-struct sctp_info {
- __u32 sctpi_tag;
- __u32 sctpi_state;
- __u32 sctpi_rwnd;
- __u16 sctpi_unackdata;
- __u16 sctpi_penddata;
- __u16 sctpi_instrms;
- __u16 sctpi_outstrms;
- __u32 sctpi_fragmentation_point;
- __u32 sctpi_inqueue;
- __u32 sctpi_outqueue;
- __u32 sctpi_overall_error;
- __u32 sctpi_max_burst;
- __u32 sctpi_maxseg;
- __u32 sctpi_peer_rwnd;
- __u32 sctpi_peer_tag;
- __u8 sctpi_peer_capable;
- __u8 sctpi_peer_sack;
- __u16 __reserved1;
-
- /* assoc status info */
- __u64 sctpi_isacks;
- __u64 sctpi_osacks;
- __u64 sctpi_opackets;
- __u64 sctpi_ipackets;
- __u64 sctpi_rtxchunks;
- __u64 sctpi_outofseqtsns;
- __u64 sctpi_idupchunks;
- __u64 sctpi_gapcnt;
- __u64 sctpi_ouodchunks;
- __u64 sctpi_iuodchunks;
- __u64 sctpi_oodchunks;
- __u64 sctpi_iodchunks;
- __u64 sctpi_octrlchunks;
- __u64 sctpi_ictrlchunks;
-
- /* primary transport info */
- struct sockaddr_storage sctpi_p_address;
- __s32 sctpi_p_state;
- __u32 sctpi_p_cwnd;
- __u32 sctpi_p_srtt;
- __u32 sctpi_p_rto;
- __u32 sctpi_p_hbinterval;
- __u32 sctpi_p_pathmaxrxt;
- __u32 sctpi_p_sackdelay;
- __u32 sctpi_p_sackfreq;
- __u32 sctpi_p_ssthresh;
- __u32 sctpi_p_partial_bytes_acked;
- __u32 sctpi_p_flight_size;
- __u16 sctpi_p_error;
- __u16 __reserved2;
-
- /* sctp sock info */
- __u32 sctpi_s_autoclose;
- __u32 sctpi_s_adaptation_ind;
- __u32 sctpi_s_pd_point;
- __u8 sctpi_s_nodelay;
- __u8 sctpi_s_disable_fragments;
- __u8 sctpi_s_v4mapped;
- __u8 sctpi_s_frag_interleave;
- __u32 sctpi_s_type;
- __u32 __reserved3;
-};
-
struct sctp_infox {
struct sctp_info *sctpinfo;
struct sctp_association *asoc;
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 923266cd294a..48ec7651989b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -111,7 +111,6 @@ struct uart_8250_port {
* if no_console_suspend
*/
unsigned char probe;
- struct mctrl_gpios *gpios;
#define UART_PROBE_RSA (1 << 0)
/*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6f0b3e0adc73..0f665cb26b50 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
__skb_linearize(skb) : 0;
}
+static __always_inline void
+__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+ unsigned int off)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_block_sub(skb->csum,
+ csum_partial(start, len, 0), off);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/**
* skb_postpull_rcsum - update checksum for received skb after pull
* @skb: buffer to update
@@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
* CHECKSUM_NONE so that it can be recomputed from scratch.
*/
-
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
- else if (skb->ip_summed == CHECKSUM_PARTIAL &&
- skb_checksum_start_offset(skb) < 0)
- skb->ip_summed = CHECKSUM_NONE;
+ __skb_postpull_rcsum(skb, start, len, 0);
}
-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+static __always_inline void
+__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
+ unsigned int off)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_block_add(skb->csum,
+ csum_partial(start, len, 0), off);
+}
+/**
+ * skb_postpush_rcsum - update checksum for received skb after push
+ * @skb: buffer to update
+ * @start: start of data after push
+ * @len: length of data pushed
+ *
+ * After doing a push on a received packet, you need to call this to
+ * update the CHECKSUM_COMPLETE checksum.
+ */
static inline void skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- /* For performing the reverse operation to skb_postpull_rcsum(),
- * we can instead of ...
- *
- * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
- *
- * ... just use this equivalent version here to save a few
- * instructions. Feeding csum of 0 in csum_partial() and later
- * on adding skb->csum is equivalent to feed skb->csum in the
- * first place.
- */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_partial(start, len, skb->csum);
+ __skb_postpush_rcsum(skb, start, len, 0);
}
+unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+
/**
* skb_push_rcsum - push skb and update receive checksum
* @skb: buffer to update
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1a4ea551aae5..4293808d8cfb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -155,6 +155,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
index 76199b75d584..e302c447e057 100644
--- a/include/linux/smc91x.h
+++ b/include/linux/smc91x.h
@@ -1,6 +1,16 @@
#ifndef __SMC91X_H__
#define __SMC91X_H__
+/*
+ * These bits define which access sizes a platform can support, rather
+ * than the maximal access size. So, if your platform can do 16-bit
+ * and 32-bit accesses to the SMC91x device, but not 8-bit, set both
+ * SMC91X_USE_16BIT and SMC91X_USE_32BIT.
+ *
+ * The SMC91x driver requires at least one of SMC91X_USE_8BIT or
+ * SMC91X_USE_16BIT to be supported - just setting SMC91X_USE_32BIT is
+ * an invalid configuration.
+ */
#define SMC91X_USE_8BIT (1 << 0)
#define SMC91X_USE_16BIT (1 << 1)
#define SMC91X_USE_32BIT (1 << 2)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index eccae4690f41..8e0cb7a0f836 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -196,6 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
+int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
+ bool phys);
+
/* SMP core functions */
int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index b6810c92b8bb..5c02b0691587 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -195,6 +195,8 @@ int rpc_clnt_add_xprt(struct rpc_clnt *, struct xprt_create *,
struct rpc_xprt *,
void *),
void *data);
+void rpc_cap_max_reconnect_timeout(struct rpc_clnt *clnt,
+ unsigned long timeo);
const char *rpc_proc_name(const struct rpc_task *task);
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5e3e1b63dbb3..a16070dd03ee 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -218,7 +218,8 @@ struct rpc_xprt {
struct work_struct task_cleanup;
struct timer_list timer;
unsigned long last_used,
- idle_timeout;
+ idle_timeout,
+ max_reconnect_timeout;
/*
* Send stuff
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 7693e39b14fe..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -245,6 +245,7 @@ static inline bool idle_should_freeze(void)
return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
}
+extern void __init pm_states_init(void);
extern void freeze_set_ops(const struct platform_freeze_ops *ops);
extern void freeze_wake(void);
@@ -279,6 +280,7 @@ static inline bool pm_resume_via_firmware(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline bool idle_should_freeze(void) { return false; }
+static inline void __init pm_states_init(void) {}
static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
static inline void freeze_wake(void) {}
#endif /* !CONFIG_SUSPEND */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b17cc4830fa6..4a529c984a3f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
static inline void workingset_node_pages_dec(struct radix_tree_node *node)
{
+ VM_BUG_ON(!workingset_node_pages(node));
node->count--;
}
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
{
+ VM_BUG_ON(!workingset_node_shadows(node));
node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
}
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 697e160c78d0..a4f7203a9017 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -42,6 +42,8 @@ extern int proc_dostring(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
+extern int proc_douintvec(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 352b1542f5cc..45f004e9cc59 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -13,6 +13,21 @@
struct timespec;
struct compat_timespec;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+struct thread_info {
+ unsigned long flags; /* low level flags */
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+}
+#endif
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+#define current_thread_info() ((struct thread_info *)current)
+#endif
+
/*
* System call restart block.
*/
@@ -105,6 +120,31 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ if (!__builtin_constant_p(n))
+ __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 6685a73736a2..a45702eb3e7b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -43,7 +43,7 @@
#define TORTURE_FLAG "-torture:"
#define TOROUT_STRING(s) \
- pr_alert("%s" TORTURE_FLAG s "\n", torture_type)
+ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s)
#define VERBOSE_TOROUT_STRING(s) \
do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
#define VERBOSE_TOROUT_ERRSTRING(s) \
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index d3a2bb712af3..650f3dd6b800 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
#endif
}
-static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_begin(&syncp->seq);
#else
-#if BITS_PER_LONG==32
- preempt_disable();
-#endif
return 0;
#endif
}
-static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
+ preempt_disable();
+#endif
+ return __u64_stats_fetch_begin(syncp);
+}
+
+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
return read_seqcount_retry(&syncp->seq, start);
#else
-#if BITS_PER_LONG==32
- preempt_enable();
-#endif
return false;
#endif
}
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
+ preempt_enable();
+#endif
+ return __u64_stats_fetch_retry(syncp, start);
+}
+
/*
* In case irq handlers can update u64 counters, readers can use following helpers
* - SMP 32bit arches use seqcount protection, irq safe.
@@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
*/
static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- return read_seqcount_begin(&syncp->seq);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
local_irq_disable();
#endif
- return 0;
-#endif
+ return __u64_stats_fetch_begin(syncp);
}
static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
+ unsigned int start)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- return read_seqcount_retry(&syncp->seq, start);
-#else
-#if BITS_PER_LONG==32
+#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
local_irq_enable();
#endif
- return false;
-#endif
+ return __u64_stats_fetch_retry(syncp, start);
}
#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 349557825428..f30c187ed785 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -114,8 +114,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr) __get_user(x, ptr)
-#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
#endif
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1b5d1cd796e2..75b4aaf31a9d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index c3ff74d764fa..2408e8d5c05c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -248,6 +248,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
(!__builtin_constant_p(state) || \
state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+extern void init_wait_entry(wait_queue_t *__wait, int flags);
+
/*
* The below macro ___wait_event() has an explicit shadow of the __ret
* variable when used from the wait_event_*() macros.
@@ -266,12 +268,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
wait_queue_t __wait; \
long __ret = ret; /* explicit shadow */ \
\
- INIT_LIST_HEAD(&__wait.task_list); \
- if (exclusive) \
- __wait.flags = WQ_FLAG_EXCLUSIVE; \
- else \
- __wait.flags = 0; \
- \
+ init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
for (;;) { \
long __int = prepare_to_wait_event(&wq, &__wait, state);\
\
@@ -280,12 +277,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- if (exclusive) { \
- abort_exclusive_wait(&wq, &__wait, \
- state, NULL); \
- goto __out; \
- } \
- break; \
+ goto __out; \
} \
\
cmd; \
@@ -989,7 +981,6 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
OpenPOWER on IntegriCloud