summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandre Oliva <lxoliva@fsfla.org>2018-01-20 09:23:18 +0000
committerAlexandre Oliva <lxoliva@fsfla.org>2018-01-20 09:23:18 +0000
commitb925f623020d921032ce2fc09af8752b3bc469d3 (patch)
tree54243499f699943cbdbb0c56e8db5f78713f6f7f
parent7a38fe51191195b46183dd0588983d1d6a9d6dc7 (diff)
downloadlinux-libre-raptor-b925f623020d921032ce2fc09af8752b3bc469d3.tar.gz
linux-libre-raptor-b925f623020d921032ce2fc09af8752b3bc469d3.zip
4.14.14-300.fc27.gnu
-rw-r--r--freed-ora/current/f27/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch66
-rw-r--r--freed-ora/current/f27/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch58
-rw-r--r--freed-ora/current/f27/0002-sysfs-cpu-Add-vulnerability-folder.patch154
-rw-r--r--freed-ora/current/f27/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch82
-rw-r--r--freed-ora/current/f27/cgroup-for-4.15-fixes-cgroup-fix-css_task_iter-crash-on-CSS_TASK_ITER_PROC.patch132
-rw-r--r--freed-ora/current/f27/e1000e-Fix-e1000_check_for_copper_link_ich8lan-return-value..patch70
-rw-r--r--freed-ora/current/f27/kernel.spec33
-rw-r--r--freed-ora/current/f27/loop-fix-concurrent-lo_open-lo_release.patch55
-rw-r--r--freed-ora/current/f27/patch-4.14-gnu-4.14.13-gnu.xz.sign6
-rw-r--r--freed-ora/current/f27/patch-4.14-gnu-4.14.14-gnu.xz.sign6
-rw-r--r--freed-ora/current/f27/ppc-mitigations.patch1309
-rw-r--r--freed-ora/current/f27/retpoline.patch1480
-rw-r--r--freed-ora/current/f27/sources2
-rw-r--r--freed-ora/current/f27/v4-KVM-Fix-stack-out-of-bounds-read-in-write_mmio.patch215
14 files changed, 1390 insertions, 2278 deletions
diff --git a/freed-ora/current/f27/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch b/freed-ora/current/f27/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch
deleted file mode 100644
index b44c184d9..000000000
--- a/freed-ora/current/f27/0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From e4d0e84e490790798691aaa0f2e598637f1867ec Mon Sep 17 00:00:00 2001
-From: Tom Lendacky <thomas.lendacky@amd.com>
-Date: Mon, 8 Jan 2018 16:09:21 -0600
-Subject: [PATCH 1/2] x86/cpu/AMD: Make LFENCE a serializing instruction
-
-To aid in speculation control, make LFENCE a serializing instruction
-since it has less overhead than MFENCE. This is done by setting bit 1
-of MSR 0xc0011029 (DE_CFG). Some families that support LFENCE do not
-have this MSR. For these families, the LFENCE instruction is already
-serializing.
-
-Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dan Williams <dan.j.williams@intel.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: David Woodhouse <dwmw@amazon.co.uk>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/20180108220921.12580.71694.stgit@tlendack-t1.amdoffice.net
----
- arch/x86/include/asm/msr-index.h | 2 ++
- arch/x86/kernel/cpu/amd.c | 10 ++++++++++
- 2 files changed, 12 insertions(+)
-
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
-index ab022618a50a..1e7d710fef43 100644
---- a/arch/x86/include/asm/msr-index.h
-+++ b/arch/x86/include/asm/msr-index.h
-@@ -352,6 +352,8 @@
- #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
- #define FAM10H_MMIO_CONF_BASE_SHIFT 20
- #define MSR_FAM10H_NODE_ID 0xc001100c
-+#define MSR_F10H_DECFG 0xc0011029
-+#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-
- /* K8 MSRs */
- #define MSR_K8_TOP_MEM1 0xc001001a
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index bcb75dc97d44..5b438d81beb2 100644
---- a/arch/x86/kernel/cpu/amd.c
-+++ b/arch/x86/kernel/cpu/amd.c
-@@ -829,6 +829,16 @@ static void init_amd(struct cpuinfo_x86 *c)
- set_cpu_cap(c, X86_FEATURE_K8);
-
- if (cpu_has(c, X86_FEATURE_XMM2)) {
-+ /*
-+ * A serializing LFENCE has less overhead than MFENCE, so
-+ * use it for execution serialization. On families which
-+ * don't have that MSR, LFENCE is already serializing.
-+ * msr_set_bit() uses the safe accessors, too, even if the MSR
-+ * is not present.
-+ */
-+ msr_set_bit(MSR_F10H_DECFG,
-+ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
-+
- /* MFENCE stops RDTSC speculation */
- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
- }
---
-2.14.3
-
diff --git a/freed-ora/current/f27/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch b/freed-ora/current/f27/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch
deleted file mode 100644
index e358c16f9..000000000
--- a/freed-ora/current/f27/0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-From 99c6fa2511d8a683e61468be91b83f85452115fa Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Sat, 6 Jan 2018 11:49:23 +0000
-Subject: [PATCH 1/2] x86/cpufeatures: Add X86_BUG_SPECTRE_V[12]
-
-Add the bug bits for spectre v1/2 and force them unconditionally for all
-cpus.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Cc: stable@vger.kernel.org
-Link: https://lkml.kernel.org/r/1515239374-23361-2-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/include/asm/cpufeatures.h | 2 ++
- arch/x86/kernel/cpu/common.c | 3 +++
- 2 files changed, 5 insertions(+)
-
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
-index 21ac898df2d8..1641c2f96363 100644
---- a/arch/x86/include/asm/cpufeatures.h
-+++ b/arch/x86/include/asm/cpufeatures.h
-@@ -342,5 +342,7 @@
- #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
- #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
- #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
-+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
-+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
-
- #endif /* _ASM_X86_CPUFEATURES_H */
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 2d3bd2215e5b..372ba3fb400f 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -902,6 +902,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
- if (c->x86_vendor != X86_VENDOR_AMD)
- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
-+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
-+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-+
- fpu__init_system(c);
-
- #ifdef CONFIG_X86_32
---
-2.14.3
-
diff --git a/freed-ora/current/f27/0002-sysfs-cpu-Add-vulnerability-folder.patch b/freed-ora/current/f27/0002-sysfs-cpu-Add-vulnerability-folder.patch
deleted file mode 100644
index 8f1ae3a6a..000000000
--- a/freed-ora/current/f27/0002-sysfs-cpu-Add-vulnerability-folder.patch
+++ /dev/null
@@ -1,154 +0,0 @@
-From 87590ce6e373d1a5401f6539f0c59ef92dd924a9 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 7 Jan 2018 22:48:00 +0100
-Subject: [PATCH 2/2] sysfs/cpu: Add vulnerability folder
-
-As the meltdown/spectre problem affects several CPU architectures, it makes
-sense to have common way to express whether a system is affected by a
-particular vulnerability or not. If affected the way to express the
-mitigation should be common as well.
-
-Create /sys/devices/system/cpu/vulnerabilities folder and files for
-meltdown, spectre_v1 and spectre_v2.
-
-Allow architectures to override the show function.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Will Deacon <will.deacon@arm.com>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Linus Torvalds <torvalds@linuxfoundation.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: David Woodhouse <dwmw@amazon.co.uk>
-Link: https://lkml.kernel.org/r/20180107214913.096657732@linutronix.de
----
- Documentation/ABI/testing/sysfs-devices-system-cpu | 16 ++++++++
- drivers/base/Kconfig | 3 ++
- drivers/base/cpu.c | 48 ++++++++++++++++++++++
- include/linux/cpu.h | 7 ++++
- 4 files changed, 74 insertions(+)
-
-diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
-index f3d5817c4ef0..bd3a88e16d8b 100644
---- a/Documentation/ABI/testing/sysfs-devices-system-cpu
-+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
-@@ -373,3 +373,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
- Description: information about CPUs heterogeneity.
-
- cpu_capacity: capacity of cpu#.
-+
-+What: /sys/devices/system/cpu/vulnerabilities
-+ /sys/devices/system/cpu/vulnerabilities/meltdown
-+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
-+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
-+Date: Januar 2018
-+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
-+Description: Information about CPU vulnerabilities
-+
-+ The files are named after the code names of CPU
-+ vulnerabilities. The output of those files reflects the
-+ state of the CPUs in the system. Possible output values:
-+
-+ "Not affected" CPU is not affected by the vulnerability
-+ "Vulnerable" CPU is affected and no mitigation in effect
-+ "Mitigation: $M" CPU is affetcted and mitigation $M is in effect
-diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
-index 2f6614c9a229..37a71fd9043f 100644
---- a/drivers/base/Kconfig
-+++ b/drivers/base/Kconfig
-@@ -235,6 +235,9 @@ config GENERIC_CPU_DEVICES
- config GENERIC_CPU_AUTOPROBE
- bool
-
-+config GENERIC_CPU_VULNERABILITIES
-+ bool
-+
- config SOC_BUS
- bool
- select GLOB
-diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
-index 321cd7b4d817..825964efda1d 100644
---- a/drivers/base/cpu.c
-+++ b/drivers/base/cpu.c
-@@ -501,10 +501,58 @@ static void __init cpu_dev_register_generic(void)
- #endif
- }
-
-+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
-+
-+ssize_t __weak cpu_show_meltdown(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "Not affected\n");
-+}
-+
-+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "Not affected\n");
-+}
-+
-+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ return sprintf(buf, "Not affected\n");
-+}
-+
-+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
-+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
-+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
-+
-+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
-+ &dev_attr_meltdown.attr,
-+ &dev_attr_spectre_v1.attr,
-+ &dev_attr_spectre_v2.attr,
-+ NULL
-+};
-+
-+static const struct attribute_group cpu_root_vulnerabilities_group = {
-+ .name = "vulnerabilities",
-+ .attrs = cpu_root_vulnerabilities_attrs,
-+};
-+
-+static void __init cpu_register_vulnerabilities(void)
-+{
-+ if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
-+ &cpu_root_vulnerabilities_group))
-+ pr_err("Unable to register CPU vulnerabilities\n");
-+}
-+
-+#else
-+static inline void cpu_register_vulnerabilities(void) { }
-+#endif
-+
- void __init cpu_dev_init(void)
- {
- if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
- panic("Failed to register CPU subsystem");
-
- cpu_dev_register_generic();
-+ cpu_register_vulnerabilities();
- }
-diff --git a/include/linux/cpu.h b/include/linux/cpu.h
-index 938ea8ae0ba4..c816e6f2730c 100644
---- a/include/linux/cpu.h
-+++ b/include/linux/cpu.h
-@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
- extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
- extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
-
-+extern ssize_t cpu_show_meltdown(struct device *dev,
-+ struct device_attribute *attr, char *buf);
-+extern ssize_t cpu_show_spectre_v1(struct device *dev,
-+ struct device_attribute *attr, char *buf);
-+extern ssize_t cpu_show_spectre_v2(struct device *dev,
-+ struct device_attribute *attr, char *buf);
-+
- extern __printf(4, 5)
- struct device *cpu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
---
-2.14.3
-
diff --git a/freed-ora/current/f27/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch b/freed-ora/current/f27/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch
deleted file mode 100644
index 8676c732f..000000000
--- a/freed-ora/current/f27/0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f Mon Sep 17 00:00:00 2001
-From: Tom Lendacky <thomas.lendacky@amd.com>
-Date: Mon, 8 Jan 2018 16:09:32 -0600
-Subject: [PATCH 2/2] x86/cpu/AMD: Use LFENCE_RDTSC in preference to
- MFENCE_RDTSC
-
-With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference
-to MFENCE_RDTSC. However, since the kernel could be running under a
-hypervisor that does not support writing that MSR, read the MSR back and
-verify that the bit has been set successfully. If the MSR can be read
-and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the
-MFENCE_RDTSC feature.
-
-Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Dan Williams <dan.j.williams@intel.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: David Woodhouse <dwmw@amazon.co.uk>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdoffice.net
----
- arch/x86/include/asm/msr-index.h | 1 +
- arch/x86/kernel/cpu/amd.c | 18 ++++++++++++++++--
- 2 files changed, 17 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
-index 1e7d710fef43..fa11fb1fa570 100644
---- a/arch/x86/include/asm/msr-index.h
-+++ b/arch/x86/include/asm/msr-index.h
-@@ -354,6 +354,7 @@
- #define MSR_FAM10H_NODE_ID 0xc001100c
- #define MSR_F10H_DECFG 0xc0011029
- #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-+#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
-
- /* K8 MSRs */
- #define MSR_K8_TOP_MEM1 0xc001001a
-diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index 5b438d81beb2..ea831c858195 100644
---- a/arch/x86/kernel/cpu/amd.c
-+++ b/arch/x86/kernel/cpu/amd.c
-@@ -829,6 +829,9 @@ static void init_amd(struct cpuinfo_x86 *c)
- set_cpu_cap(c, X86_FEATURE_K8);
-
- if (cpu_has(c, X86_FEATURE_XMM2)) {
-+ unsigned long long val;
-+ int ret;
-+
- /*
- * A serializing LFENCE has less overhead than MFENCE, so
- * use it for execution serialization. On families which
-@@ -839,8 +842,19 @@ static void init_amd(struct cpuinfo_x86 *c)
- msr_set_bit(MSR_F10H_DECFG,
- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
-
-- /* MFENCE stops RDTSC speculation */
-- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
-+ /*
-+ * Verify that the MSR write was successful (could be running
-+ * under a hypervisor) and only then assume that LFENCE is
-+ * serializing.
-+ */
-+ ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
-+ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
-+ /* A serializing LFENCE stops RDTSC speculation */
-+ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
-+ } else {
-+ /* MFENCE stops RDTSC speculation */
-+ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
-+ }
- }
-
- /*
---
-2.14.3
-
diff --git a/freed-ora/current/f27/cgroup-for-4.15-fixes-cgroup-fix-css_task_iter-crash-on-CSS_TASK_ITER_PROC.patch b/freed-ora/current/f27/cgroup-for-4.15-fixes-cgroup-fix-css_task_iter-crash-on-CSS_TASK_ITER_PROC.patch
deleted file mode 100644
index fc84559d0..000000000
--- a/freed-ora/current/f27/cgroup-for-4.15-fixes-cgroup-fix-css_task_iter-crash-on-CSS_TASK_ITER_PROC.patch
+++ /dev/null
@@ -1,132 +0,0 @@
-From patchwork Wed Dec 20 15:13:31 2017
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Subject: [cgroup/for-4.15-fixes] cgroup: fix css_task_iter crash on
- CSS_TASK_ITER_PROC
-From: Tejun Heo <tj@kernel.org>
-X-Patchwork-Id: 10125801
-Message-Id: <20171220151331.GA3413940@devbig577.frc2.facebook.com>
-To: Laura Abbott <labbott@redhat.com>
-Cc: Zefan Li <lizefan@huawei.com>, linux-kernel@vger.kernel.org,
- cgroups@vger.kernel.org, regressions@leemhuis.info,
- Bronek Kozicki <brok@incorrekt.com>, George Amanakis <gamanakis@gmail.com>
-Date: Wed, 20 Dec 2017 07:13:31 -0800
-
-Hello,
-
-Applied the following to cgroup/for-4.15-fixes. Will push out to
-linus later this week. I could reproduce the problem reliably and am
-pretty sure this is the right fix but I'd greatly appreciate if you
-guys can confirm the fix too.
-
-Thank you very much.
-
------- 8< ------
->From 74d0833c659a8a54735e5efdd44f4b225af68586 Mon Sep 17 00:00:00 2001
-From: Tejun Heo <tj@kernel.org>
-Date: Wed, 20 Dec 2017 07:09:19 -0800
-
-While teaching css_task_iter to handle skipping over tasks which
-aren't group leaders, bc2fb7ed089f ("cgroup: add @flags to
-css_task_iter_start() and implement CSS_TASK_ITER_PROCS") introduced a
-silly bug.
-
-CSS_TASK_ITER_PROCS is implemented by repeating
-css_task_iter_advance() while the advanced cursor is pointing to a
-non-leader thread. However, the cursor variable, @l, wasn't updated
-when the iteration has to advance to the next css_set and the
-following repetition would operate on the terminal @l from the
-previous iteration which isn't pointing to a valid task leading to
-oopses like the following or infinite looping.
-
- BUG: unable to handle kernel NULL pointer dereference at 0000000000000254
- IP: __task_pid_nr_ns+0xc7/0xf0
- PGD 0 P4D 0
- Oops: 0000 [#1] SMP
- ...
- CPU: 2 PID: 1 Comm: systemd Not tainted 4.14.4-200.fc26.x86_64 #1
- Hardware name: System manufacturer System Product Name/PRIME B350M-A, BIOS 3203 11/09/2017
- task: ffff88c4baee8000 task.stack: ffff96d5c3158000
- RIP: 0010:__task_pid_nr_ns+0xc7/0xf0
- RSP: 0018:ffff96d5c315bd50 EFLAGS: 00010206
- RAX: 0000000000000000 RBX: ffff88c4b68c6000 RCX: 0000000000000250
- RDX: ffffffffa5e47960 RSI: 0000000000000000 RDI: ffff88c490f6ab00
- RBP: ffff96d5c315bd50 R08: 0000000000001000 R09: 0000000000000005
- R10: ffff88c4be006b80 R11: ffff88c42f1b8004 R12: ffff96d5c315bf18
- R13: ffff88c42d7dd200 R14: ffff88c490f6a510 R15: ffff88c4b68c6000
- FS: 00007f9446f8ea00(0000) GS:ffff88c4be680000(0000) knlGS:0000000000000000
- CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
- CR2: 0000000000000254 CR3: 00000007f956f000 CR4: 00000000003406e0
- Call Trace:
- cgroup_procs_show+0x19/0x30
- cgroup_seqfile_show+0x4c/0xb0
- kernfs_seq_show+0x21/0x30
- seq_read+0x2ec/0x3f0
- kernfs_fop_read+0x134/0x180
- __vfs_read+0x37/0x160
- ? security_file_permission+0x9b/0xc0
- vfs_read+0x8e/0x130
- SyS_read+0x55/0xc0
- entry_SYSCALL_64_fastpath+0x1a/0xa5
- RIP: 0033:0x7f94455f942d
- RSP: 002b:00007ffe81ba2d00 EFLAGS: 00000293 ORIG_RAX: 0000000000000000
- RAX: ffffffffffffffda RBX: 00005574e2233f00 RCX: 00007f94455f942d
- RDX: 0000000000001000 RSI: 00005574e2321a90 RDI: 000000000000002b
- RBP: 0000000000000000 R08: 00005574e2321a90 R09: 00005574e231de60
- R10: 00007f94458c8b38 R11: 0000000000000293 R12: 00007f94458c8ae0
- R13: 00007ffe81ba3800 R14: 0000000000000000 R15: 00005574e2116560
- Code: 04 74 0e 89 f6 48 8d 04 76 48 8d 04 c5 f0 05 00 00 48 8b bf b8 05 00 00 48 01 c7 31 c0 48 8b 0f 48 85 c9 74 18 8b b2 30 08 00 00 <3b> 71 04 77 0d 48 c1 e6 05 48 01 f1 48 3b 51 38 74 09 5d c3 8b
- RIP: __task_pid_nr_ns+0xc7/0xf0 RSP: ffff96d5c315bd50
-
-Fix it by moving the initialization of the cursor below the repeat
-label. While at it, rename it to @next for readability.
-
-Signed-off-by: Tejun Heo <tj@kernel.org>
-Fixes: bc2fb7ed089f ("cgroup: add @flags to css_task_iter_start() and implement CSS_TASK_ITER_PROCS")
-Cc: stable@vger.kernel.org # v4.14+
-Reported-by: Laura Abbott <labbott@redhat.com>
-Reported-by: Bronek Kozicki <brok@incorrekt.com>
-Reported-by: George Amanakis <gamanakis@gmail.com>
-Signed-off-by: Tejun Heo <tj@kernel.org>
----
- kernel/cgroup/cgroup.c | 14 ++++++--------
- 1 file changed, 6 insertions(+), 8 deletions(-)
-
-diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
-index f4c2f8c..2cf06c2 100644
---- a/kernel/cgroup/cgroup.c
-+++ b/kernel/cgroup/cgroup.c
-@@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
-
- static void css_task_iter_advance(struct css_task_iter *it)
- {
-- struct list_head *l = it->task_pos;
-+ struct list_head *next;
-
- lockdep_assert_held(&css_set_lock);
-- WARN_ON_ONCE(!l);
--
- repeat:
- /*
- * Advance iterator to find next entry. cset->tasks is consumed
- * first and then ->mg_tasks. After ->mg_tasks, we move onto the
- * next cset.
- */
-- l = l->next;
-+ next = it->task_pos->next;
-
-- if (l == it->tasks_head)
-- l = it->mg_tasks_head->next;
-+ if (next == it->tasks_head)
-+ next = it->mg_tasks_head->next;
-
-- if (l == it->mg_tasks_head)
-+ if (next == it->mg_tasks_head)
- css_task_iter_advance_css_set(it);
- else
-- it->task_pos = l;
-+ it->task_pos = next;
-
- /* if PROCS, skip over tasks which aren't group leaders */
- if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
diff --git a/freed-ora/current/f27/e1000e-Fix-e1000_check_for_copper_link_ich8lan-return-value..patch b/freed-ora/current/f27/e1000e-Fix-e1000_check_for_copper_link_ich8lan-return-value..patch
deleted file mode 100644
index a31d5d2c5..000000000
--- a/freed-ora/current/f27/e1000e-Fix-e1000_check_for_copper_link_ich8lan-return-value..patch
+++ /dev/null
@@ -1,70 +0,0 @@
-From patchwork Mon Dec 11 07:26:40 2017
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Subject: e1000e: Fix e1000_check_for_copper_link_ich8lan return value.
-From: Benjamin Poirier <bpoirier@suse.com>
-X-Patchwork-Id: 10104349
-Message-Id: <20171211072640.7935-1-bpoirier@suse.com>
-To: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>,
- Christian Hesse <list@eworm.de>, Gabriel C <nix.or.die@gmail.com>,
- intel-wired-lan@lists.osuosl.org, netdev@vger.kernel.org,
- linux-kernel@vger.kernel.org, stable@vger.kernel.org
-Date: Mon, 11 Dec 2017 16:26:40 +0900
-
-e1000e_check_for_copper_link() and e1000_check_for_copper_link_ich8lan()
-are the two functions that may be assigned to mac.ops.check_for_link when
-phy.media_type == e1000_media_type_copper. Commit 19110cfbb34d ("e1000e:
-Separate signaling for link check/link up") changed the meaning of the
-return value of check_for_link for copper media but only adjusted the first
-function. This patch adjusts the second function likewise.
-
-Reported-by: Christian Hesse <list@eworm.de>
-Reported-by: Gabriel C <nix.or.die@gmail.com>
-Link: https://bugzilla.kernel.org/show_bug.cgi?id=198047
-Fixes: 19110cfbb34d ("e1000e: Separate signaling for link check/link up")
-Tested-by: Christian Hesse <list@eworm.de>
-Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
----
- drivers/net/ethernet/intel/e1000e/ich8lan.c | 11 ++++++++---
- 1 file changed, 8 insertions(+), 3 deletions(-)
-
-diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
-index d6d4ed7acf03..31277d3bb7dc 100644
---- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
-+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
-@@ -1367,6 +1367,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
- * Checks to see of the link status of the hardware has changed. If a
- * change in link status has been detected, then we read the PHY registers
- * to get the current speed/duplex if link exists.
-+ *
-+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
-+ * up).
- **/
- static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
- {
-@@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
- * Change or Rx Sequence Error interrupt.
- */
- if (!mac->get_link_status)
-- return 0;
-+ return 1;
-
- /* First we want to see if the MII Status Register reports
- * link. If so, then we want to get the current speed/duplex
-@@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
- * different link partner.
- */
- ret_val = e1000e_config_fc_after_link_up(hw);
-- if (ret_val)
-+ if (ret_val) {
- e_dbg("Error configuring flow control\n");
-+ return ret_val;
-+ }
-
-- return ret_val;
-+ return 1;
- }
-
- static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/freed-ora/current/f27/kernel.spec b/freed-ora/current/f27/kernel.spec
index 44a7939bc..bf17463f0 100644
--- a/freed-ora/current/f27/kernel.spec
+++ b/freed-ora/current/f27/kernel.spec
@@ -92,7 +92,7 @@ Summary: The Linux kernel
%if 0%{?released_kernel}
# Do we have a -stable update to apply?
-%define stable_update 13
+%define stable_update 14
# Set rpm version accordingly
%if 0%{?stable_update}
%define stablerev %{stable_update}
@@ -685,17 +685,12 @@ Patch504: netfilter-xt_osf-Add-missing-permission-checks.patch
# rhbz 1525768 1525769
Patch505: netfilter-nfnetlink_cthelper-Add-missing-permission-.patch
-# rhbz 1525523
-# https://patchwork.kernel.org/patch/10104349/
-Patch506: e1000e-Fix-e1000_check_for_copper_link_ich8lan-return-value..patch
+# CVE-2018-5344 rhbz 1533909 1533911
+Patch507: loop-fix-concurrent-lo_open-lo_release.patch
# 550-600 Meltdown and Spectre Fixes
Patch550: prevent-bounds-check-bypass-via-speculative-execution.patch
-Patch551: 0001-x86-cpufeatures-Add-X86_BUG_SPECTRE_V-12.patch
-Patch552: 0002-sysfs-cpu-Add-vulnerability-folder.patch
-Patch553: 0001-x86-cpu-AMD-Make-LFENCE-a-serializing-instruction.patch
-Patch554: 0002-x86-cpu-AMD-Use-LFENCE_RDTSC-in-preference-to-MFENCE.patch
-Patch555: retpoline.patch
+Patch551: ppc-mitigations.patch
# 600 - Patches for improved Bay and Cherry Trail device support
# Below patches are submitted upstream, awaiting review / merging
@@ -721,11 +716,6 @@ Patch627: qxl-fixes.patch
# rhbz 1462175
Patch628: HID-rmi-Check-that-a-device-is-a-RMI-device-before-c.patch
-# CVE-2017-17741 rhbz 1527112 1527113
-Patch630: v4-KVM-Fix-stack-out-of-bounds-read-in-write_mmio.patch
-
-Patch631: cgroup-for-4.15-fixes-cgroup-fix-css_task_iter-crash-on-CSS_TASK_ITER_PROC.patch
-
# rhbz1514969
Patch633: 0001-platform-x86-dell-laptop-Filter-out-spurious-keyboar.patch
@@ -2357,6 +2347,21 @@ fi
#
#
%changelog
+* Fri Jan 19 2018 Alexandre Oliva <lxoliva@fsfla.org> -libre
+- GNU Linux-libre 4.14.14-gnu.
+
+* Thu Jan 18 2018 Justin M. Forbes <jforbes@fedoraproject.org> - 4.14.14-300
+- Add some ppc mitigations from upstream
+
+* Wed Jan 17 2018 Justin M. Forbes <jforbes@fedoraproject.org>
+- Linux v4.14.14
+- Fixes (rhbz 1532458)
+
+* Fri Jan 12 2018 Jeremy Cline <jeremy@jcline.org>
+- Fix for CVE-2018-5344 (rhbz 1533909 1533911)
+- Fix for CVE-2018-5332 (rhbz 1533890 1533895)
+- Fix for CVE-2018-5333 (rhbz 1533891 1533895)
+
* Thu Jan 11 2018 Alexandre Oliva <lxoliva@fsfla.org> -libre
- GNU Linux-libre 4.14.13-gnu.
diff --git a/freed-ora/current/f27/loop-fix-concurrent-lo_open-lo_release.patch b/freed-ora/current/f27/loop-fix-concurrent-lo_open-lo_release.patch
new file mode 100644
index 000000000..37131a702
--- /dev/null
+++ b/freed-ora/current/f27/loop-fix-concurrent-lo_open-lo_release.patch
@@ -0,0 +1,55 @@
+From ae6650163c66a7eff1acd6eb8b0f752dcfa8eba5 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 5 Jan 2018 16:26:00 -0800
+Subject: [PATCH] loop: fix concurrent lo_open/lo_release
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+范龙飞 reports that KASAN can report a use-after-free in __lock_acquire.
+The reason is due to insufficient serialization in lo_release(), which
+will continue to use the loop device even after it has decremented the
+lo_refcnt to zero.
+
+In the meantime, another process can come in, open the loop device
+again as it is being shut down. Confusion ensues.
+
+Reported-by: 范龙飞 <long7573@126.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ drivers/block/loop.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index bc8e61506968..d5fe720cf149 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1581,9 +1581,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
+ return err;
+ }
+
+-static void lo_release(struct gendisk *disk, fmode_t mode)
++static void __lo_release(struct loop_device *lo)
+ {
+- struct loop_device *lo = disk->private_data;
+ int err;
+
+ if (atomic_dec_return(&lo->lo_refcnt))
+@@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
+ mutex_unlock(&lo->lo_ctl_mutex);
+ }
+
++static void lo_release(struct gendisk *disk, fmode_t mode)
++{
++ mutex_lock(&loop_index_mutex);
++ __lo_release(disk->private_data);
++ mutex_unlock(&loop_index_mutex);
++}
++
+ static const struct block_device_operations lo_fops = {
+ .owner = THIS_MODULE,
+ .open = lo_open,
+--
+2.15.1
+
diff --git a/freed-ora/current/f27/patch-4.14-gnu-4.14.13-gnu.xz.sign b/freed-ora/current/f27/patch-4.14-gnu-4.14.13-gnu.xz.sign
deleted file mode 100644
index 7cb95d688..000000000
--- a/freed-ora/current/f27/patch-4.14-gnu-4.14.13-gnu.xz.sign
+++ /dev/null
@@ -1,6 +0,0 @@
------BEGIN PGP SIGNATURE-----
-
-iF0EABECAB0WIQRHRALIxYLa++OJxCe8t8+Hfn1HpwUCWlZLoQAKCRC8t8+Hfn1H
-p/qlAJ9XbAmVDjiZ4U/Rc0cGjusIbI4L3QCdHwliDTnrvD2Ye48B3VNAAAni8q0=
-=klyt
------END PGP SIGNATURE-----
diff --git a/freed-ora/current/f27/patch-4.14-gnu-4.14.14-gnu.xz.sign b/freed-ora/current/f27/patch-4.14-gnu-4.14.14-gnu.xz.sign
new file mode 100644
index 000000000..9a07f5219
--- /dev/null
+++ b/freed-ora/current/f27/patch-4.14-gnu-4.14.14-gnu.xz.sign
@@ -0,0 +1,6 @@
+-----BEGIN PGP SIGNATURE-----
+
+iF0EABECAB0WIQRHRALIxYLa++OJxCe8t8+Hfn1HpwUCWmA4RQAKCRC8t8+Hfn1H
+p4HvAJ0TuS/VMp/c9ewcEsMrjxn4Dvqi/gCdH5uBaXpNXoWOdbzyhc0F9TA7TBs=
+=QR/k
+-----END PGP SIGNATURE-----
diff --git a/freed-ora/current/f27/ppc-mitigations.patch b/freed-ora/current/f27/ppc-mitigations.patch
new file mode 100644
index 000000000..909485721
--- /dev/null
+++ b/freed-ora/current/f27/ppc-mitigations.patch
@@ -0,0 +1,1309 @@
+From 191eccb1580939fb0d47deb405b82a85b0379070 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Tue, 9 Jan 2018 03:52:05 +1100
+Subject: powerpc/pseries: Add H_GET_CPU_CHARACTERISTICS flags & wrapper
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 191eccb1580939fb0d47deb405b82a85b0379070 upstream.
+
+A new hypervisor call has been defined to communicate various
+characteristics of the CPU to guests. Add definitions for the hcall
+number, flags and a wrapper function.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/hvcall.h | 17 +++++++++++++++++
+ arch/powerpc/include/asm/plpar_wrappers.h | 14 ++++++++++++++
+ 2 files changed, 31 insertions(+)
+
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -241,6 +241,7 @@
+ #define H_GET_HCA_INFO 0x1B8
+ #define H_GET_PERF_COUNT 0x1BC
+ #define H_MANAGE_TRACE 0x1C0
++#define H_GET_CPU_CHARACTERISTICS 0x1C8
+ #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
+ #define H_QUERY_INT_STATE 0x1E4
+ #define H_POLL_PENDING 0x1D8
+@@ -330,6 +331,17 @@
+ #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2
+ /* >= 0 values are CPU number */
+
++/* H_GET_CPU_CHARACTERISTICS return values */
++#define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0
++#define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1
++#define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2
++#define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3
++#define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4
++
++#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
++#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
++#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
++
+ /* Flag values used in H_REGISTER_PROC_TBL hcall */
+ #define PROC_TABLE_OP_MASK 0x18
+ #define PROC_TABLE_DEREG 0x10
+@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_
+ }
+ }
+
++struct h_cpu_char_result {
++ u64 character;
++ u64 behaviour;
++};
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_HVCALL_H */
+--- a/arch/powerpc/include/asm/plpar_wrappers.h
++++ b/arch/powerpc/include/asm/plpar_wrappers.h
+@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_rese
+ return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
+ }
+
++static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
++{
++ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
++ long rc;
++
++ rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
++ if (rc == H_SUCCESS) {
++ p->character = retbuf[0];
++ p->behaviour = retbuf[1];
++ }
++
++ return rc;
++}
++
+ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
+From 50e51c13b3822d14ff6df4279423e4b7b2269bc3 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64: Add macros for annotating the destination of rfid/hrfid
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 50e51c13b3822d14ff6df4279423e4b7b2269bc3 upstream.
+
+The rfid/hrfid ((Hypervisor) Return From Interrupt) instruction is
+used for switching from the kernel to userspace, and from the
+hypervisor to the guest kernel. However it can and is also used for
+other transitions, eg. from real mode kernel code to virtual mode
+kernel code, and it's not always clear from the code what the
+destination context is.
+
+To make it clearer when reading the code, add macros which encode the
+expected destination context.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/exception-64e.h | 6 ++++++
+ arch/powerpc/include/asm/exception-64s.h | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 35 insertions(+)
+
+--- a/arch/powerpc/include/asm/exception-64e.h
++++ b/arch/powerpc/include/asm/exception-64e.h
+@@ -209,5 +209,11 @@ exc_##label##_book3e:
+ ori r3,r3,vector_offset@l; \
+ mtspr SPRN_IVOR##vector_number,r3;
+
++#define RFI_TO_KERNEL \
++ rfi
++
++#define RFI_TO_USER \
++ rfi
++
+ #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
+
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -69,6 +69,35 @@
+ */
+ #define EX_R3 EX_DAR
+
++/* Macros for annotating the expected destination of (h)rfid */
++
++#define RFI_TO_KERNEL \
++ rfid
++
++#define RFI_TO_USER \
++ rfid
++
++#define RFI_TO_USER_OR_KERNEL \
++ rfid
++
++#define RFI_TO_GUEST \
++ rfid
++
++#define HRFI_TO_KERNEL \
++ hrfid
++
++#define HRFI_TO_USER \
++ hrfid
++
++#define HRFI_TO_USER_OR_KERNEL \
++ hrfid
++
++#define HRFI_TO_GUEST \
++ hrfid
++
++#define HRFI_TO_UNKNOWN \
++ hrfid
++
+ #ifdef CONFIG_RELOCATABLE
+ #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+ mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
+From 222f20f140623ef6033491d0103ee0875fe87d35 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64s: Simple RFI macro conversions
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit 222f20f140623ef6033491d0103ee0875fe87d35 upstream.
+
+This commit does simple conversions of rfi/rfid to the new macros that
+include the expected destination context. By simple we mean cases
+where there is a single well known destination context, and it's
+simply a matter of substituting the instruction for the appropriate
+macro.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ arch/powerpc/include/asm/exception-64s.h | 4 ++--
+ arch/powerpc/kernel/entry_64.S | 14 +++++++++-----
+ arch/powerpc/kernel/exceptions-64s.S | 22 +++++++++++-----------
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 7 +++----
+ arch/powerpc/kvm/book3s_rmhandlers.S | 7 +++++--
+ arch/powerpc/kvm/book3s_segment.S | 4 ++--
+ 6 files changed, 32 insertions(+), 26 deletions(-)
+
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -242,7 +242,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ mtspr SPRN_##h##SRR0,r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ mtspr SPRN_##h##SRR1,r10; \
+- h##rfid; \
++ h##RFI_TO_KERNEL; \
+ b . /* prevent speculative execution */
+ #define EXCEPTION_PROLOG_PSERIES_1(label, h) \
+ __EXCEPTION_PROLOG_PSERIES_1(label, h)
+@@ -256,7 +256,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ mtspr SPRN_##h##SRR0,r12; \
+ mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
+ mtspr SPRN_##h##SRR1,r10; \
+- h##rfid; \
++ h##RFI_TO_KERNEL; \
+ b . /* prevent speculative execution */
+
+ #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -37,6 +37,11 @@
+ #include <asm/tm.h>
+ #include <asm/ppc-opcode.h>
+ #include <asm/export.h>
++#ifdef CONFIG_PPC_BOOK3S
++#include <asm/exception-64s.h>
++#else
++#include <asm/exception-64e.h>
++#endif
+
+ /*
+ * System calls.
+@@ -397,8 +402,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ mtmsrd r10, 1
+ mtspr SPRN_SRR0, r11
+ mtspr SPRN_SRR1, r12
+-
+- rfid
++ RFI_TO_USER
+ b . /* prevent speculative execution */
+ #endif
+ _ASM_NOKPROBE_SYMBOL(system_call_common);
+@@ -1073,7 +1077,7 @@ __enter_rtas:
+
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r6
+- rfid
++ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+
+ rtas_return_loc:
+@@ -1098,7 +1102,7 @@ rtas_return_loc:
+
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+- rfid
++ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+ _ASM_NOKPROBE_SYMBOL(__enter_rtas)
+ _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
+@@ -1171,7 +1175,7 @@ _GLOBAL(enter_prom)
+ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+ andc r11,r11,r12
+ mtsrr1 r11
+- rfid
++ RFI_TO_KERNEL
+ #endif /* CONFIG_PPC_BOOK3E */
+
+ 1: /* Return from OF */
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -254,7 +254,7 @@ BEGIN_FTR_SECTION
+ LOAD_HANDLER(r12, machine_check_handle_early)
+ 1: mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r11
+- rfid
++ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+ 2:
+ /* Stack overflow. Stay on emergency stack and panic.
+@@ -443,7 +443,7 @@ EXC_COMMON_BEGIN(machine_check_handle_ea
+ li r3,MSR_ME
+ andc r10,r10,r3 /* Turn off MSR_ME */
+ mtspr SPRN_SRR1,r10
+- rfid
++ RFI_TO_KERNEL
+ b .
+ 2:
+ /*
+@@ -461,7 +461,7 @@ EXC_COMMON_BEGIN(machine_check_handle_ea
+ */
+ bl machine_check_queue_event
+ MACHINE_CHECK_HANDLER_WINDUP
+- rfid
++ RFI_TO_USER_OR_KERNEL
+ 9:
+ /* Deliver the machine check to host kernel in V mode. */
+ MACHINE_CHECK_HANDLER_WINDUP
+@@ -649,7 +649,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_R
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+- rfid
++ RFI_TO_KERNEL
+ b .
+
+ 8: std r3,PACA_EXSLB+EX_DAR(r13)
+@@ -660,7 +660,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_R
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+- rfid
++ RFI_TO_KERNEL
+ b .
+
+ EXC_COMMON_BEGIN(unrecov_slb)
+@@ -905,7 +905,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
+ mtspr SPRN_SRR0,r10 ; \
+ ld r10,PACAKMSR(r13) ; \
+ mtspr SPRN_SRR1,r10 ; \
+- rfid ; \
++ RFI_TO_KERNEL ; \
+ b . ; /* prevent speculative execution */
+
+ #define SYSCALL_FASTENDIAN \
+@@ -914,7 +914,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
+ xori r12,r12,MSR_LE ; \
+ mtspr SPRN_SRR1,r12 ; \
+ mr r13,r9 ; \
+- rfid ; /* return to userspace */ \
++ RFI_TO_USER ; /* return to userspace */ \
+ b . ; /* prevent speculative execution */
+
+ #if defined(CONFIG_RELOCATABLE)
+@@ -1299,7 +1299,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+- HRFID
++ HRFI_TO_UNKNOWN
+ b .
+ #endif
+
+@@ -1403,7 +1403,7 @@ masked_##_H##interrupt: \
+ ld r10,PACA_EXGEN+EX_R10(r13); \
+ ld r11,PACA_EXGEN+EX_R11(r13); \
+ /* returns to kernel where r13 must be set up, so don't restore it */ \
+- ##_H##rfid; \
++ ##_H##RFI_TO_KERNEL; \
+ b .; \
+ MASKED_DEC_HANDLER(_H)
+
+@@ -1426,7 +1426,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
+ addi r13, r13, 4
+ mtspr SPRN_SRR0, r13
+ GET_SCRATCH0(r13)
+- rfid
++ RFI_TO_KERNEL
+ b .
+
+ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
+@@ -1438,7 +1438,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
+ addi r13, r13, 4
+ mtspr SPRN_HSRR0, r13
+ GET_SCRATCH0(r13)
+- hrfid
++ HRFI_TO_KERNEL
+ b .
+ #endif
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -78,7 +78,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
+ mtmsrd r0,1 /* clear RI in MSR */
+ mtsrr0 r5
+ mtsrr1 r6
+- RFI
++ RFI_TO_KERNEL
+
+ kvmppc_call_hv_entry:
+ ld r4, HSTATE_KVM_VCPU(r13)
+@@ -187,7 +187,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+ mtmsrd r6, 1 /* Clear RI in MSR */
+ mtsrr0 r8
+ mtsrr1 r7
+- RFI
++ RFI_TO_KERNEL
+
+ /* Virtual-mode return */
+ .Lvirt_return:
+@@ -1131,8 +1131,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+
+ ld r0, VCPU_GPR(R0)(r4)
+ ld r4, VCPU_GPR(R4)(r4)
+-
+- hrfid
++ HRFI_TO_GUEST
+ b .
+
+ secondary_too_late:
+--- a/arch/powerpc/kvm/book3s_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_rmhandlers.S
+@@ -46,6 +46,9 @@
+
+ #define FUNC(name) name
+
++#define RFI_TO_KERNEL RFI
++#define RFI_TO_GUEST RFI
++
+ .macro INTERRUPT_TRAMPOLINE intno
+
+ .global kvmppc_trampoline_\intno
+@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
+ GET_SCRATCH0(r13)
+
+ /* And get back into the code */
+- RFI
++ RFI_TO_KERNEL
+ #endif
+
+ /*
+@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
+ ori r5, r5, MSR_EE
+ mtsrr0 r7
+ mtsrr1 r6
+- RFI
++ RFI_TO_KERNEL
+
+ #include "book3s_segment.S"
+--- a/arch/powerpc/kvm/book3s_segment.S
++++ b/arch/powerpc/kvm/book3s_segment.S
+@@ -156,7 +156,7 @@ no_dcbz32_on:
+ PPC_LL r9, SVCPU_R9(r3)
+ PPC_LL r3, (SVCPU_R3)(r3)
+
+- RFI
++ RFI_TO_GUEST
+ kvmppc_handler_trampoline_enter_end:
+
+
+@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+ cmpwi r12, BOOK3S_INTERRUPT_DOORBELL
+ beqa BOOK3S_INTERRUPT_DOORBELL
+
+- RFI
++ RFI_TO_KERNEL
+ kvmppc_handler_trampoline_exit_end:
+From b8e90cb7bc04a509e821e82ab6ed7a8ef11ba333 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64: Convert the syscall exit path to use RFI_TO_USER/KERNEL
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit b8e90cb7bc04a509e821e82ab6ed7a8ef11ba333 upstream.
+
+In the syscall exit path we may be returning to user or kernel
+context. We already have a test for that, because we conditionally
+restore r13. So use that existing test and branch, and bifurcate the
+return based on that.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/entry_64.S | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -267,13 +267,23 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
++ ld r2,GPR2(r1)
++ ld r1,GPR1(r1)
++ mtlr r4
++ mtcr r5
++ mtspr SPRN_SRR0,r7
++ mtspr SPRN_SRR1,r8
++ RFI_TO_USER
++ b . /* prevent speculative execution */
++
++ /* exit to kernel */
+ 1: ld r2,GPR2(r1)
+ ld r1,GPR1(r1)
+ mtlr r4
+ mtcr r5
+ mtspr SPRN_SRR0,r7
+ mtspr SPRN_SRR1,r8
+- RFI
++ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+
+ .Lsyscall_error:
+From a08f828cf47e6c605af21d2cdec68f84e799c318 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64: Convert fast_exception_return to use RFI_TO_USER/KERNEL
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit a08f828cf47e6c605af21d2cdec68f84e799c318 upstream.
+
+Similar to the syscall return path, in fast_exception_return we may be
+returning to user or kernel context. We already have a test for that,
+because we conditionally restore r13. So use that existing test and
+branch, and bifurcate the return based on that.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/entry_64.S | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -892,7 +892,7 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
+ REST_GPR(13, r1)
+-1:
++
+ mtspr SPRN_SRR1,r3
+
+ ld r2,_CCR(r1)
+@@ -905,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r3,GPR3(r1)
+ ld r4,GPR4(r1)
+ ld r1,GPR1(r1)
++ RFI_TO_USER
++ b . /* prevent speculative execution */
+
+- rfid
++1: mtspr SPRN_SRR1,r3
++
++ ld r2,_CCR(r1)
++ mtcrf 0xFF,r2
++ ld r2,_NIP(r1)
++ mtspr SPRN_SRR0,r2
++
++ ld r0,GPR0(r1)
++ ld r2,GPR2(r1)
++ ld r3,GPR3(r1)
++ ld r4,GPR4(r1)
++ ld r1,GPR1(r1)
++ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+
+ #endif /* CONFIG_PPC_BOOK3E */
+From c7305645eb0c1621351cfc104038831ae87c0053 Mon Sep 17 00:00:00 2001
+From: Nicholas Piggin <npiggin@gmail.com>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64s: Convert slb_miss_common to use RFI_TO_USER/KERNEL
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+commit c7305645eb0c1621351cfc104038831ae87c0053 upstream.
+
+In the SLB miss handler we may be returning to user or kernel. We need
+to add a check early on and save the result in the cr4 register, and
+then we bifurcate the return path based on that.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/exceptions-64s.S | 29 ++++++++++++++++++++++++++++-
+ 1 file changed, 28 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -596,6 +596,9 @@ EXC_COMMON_BEGIN(slb_miss_common)
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
++ andi. r9,r11,MSR_PR // Check for exception from userspace
++ cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
++
+ /*
+ * Test MSR_RI before calling slb_allocate_realmode, because the
+ * MSR in r11 gets clobbered. However we still want to allocate
+@@ -622,9 +625,32 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_R
+
+ /* All done -- return from exception. */
+
++ bne cr4,1f /* returning to kernel */
++
++.machine push
++.machine "power4"
++ mtcrf 0x80,r9
++ mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
++ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
++ mtcrf 0x02,r9 /* I/D indication is in cr6 */
++ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
++.machine pop
++
++ RESTORE_CTR(r9, PACA_EXSLB)
++ RESTORE_PPR_PACA(PACA_EXSLB, r9)
++ mr r3,r12
++ ld r9,PACA_EXSLB+EX_R9(r13)
++ ld r10,PACA_EXSLB+EX_R10(r13)
++ ld r11,PACA_EXSLB+EX_R11(r13)
++ ld r12,PACA_EXSLB+EX_R12(r13)
++ ld r13,PACA_EXSLB+EX_R13(r13)
++ RFI_TO_USER
++ b . /* prevent speculative execution */
++1:
+ .machine push
+ .machine "power4"
+ mtcrf 0x80,r9
++ mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
+ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
+ mtcrf 0x02,r9 /* I/D indication is in cr6 */
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+@@ -638,9 +664,10 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_R
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+- rfid
++ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+
++
+ 2: std r3,PACA_EXSLB+EX_DAR(r13)
+ mr r3,r12
+ mfspr r11,SPRN_SRR0
+From aa8a5e0062ac940f7659394f4817c948dc8c0667 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64s: Add support for RFI flush of L1-D cache
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit aa8a5e0062ac940f7659394f4817c948dc8c0667 upstream.
+
+On some CPUs we can prevent the Meltdown vulnerability by flushing the
+L1-D cache on exit from kernel to user mode, and from hypervisor to
+guest.
+
+This is known to be the case on at least Power7, Power8 and Power9. At
+this time we do not know the status of the vulnerability on other CPUs
+such as the 970 (Apple G5), pasemi CPUs (AmigaOne X1000) or Freescale
+CPUs. As more information comes to light we can enable this, or other
+mechanisms on those CPUs.
+
+The vulnerability occurs when the load of an architecturally
+inaccessible memory region (eg. userspace load of kernel memory) is
+speculatively executed to the point where its result can influence the
+address of a subsequent speculatively executed load.
+
+In order for that to happen, the first load must hit in the L1,
+because before the load is sent to the L2 the permission check is
+performed. Therefore if no kernel addresses hit in the L1 the
+vulnerability can not occur. We can ensure that is the case by
+flushing the L1 whenever we return to userspace. Similarly for
+hypervisor vs guest.
+
+In order to flush the L1-D cache on exit, we add a section of nops at
+each (h)rfi location that returns to a lower privileged context, and
+patch that with some sequence. Newer firmwares are able to advertise
+to us that there is a special nop instruction that flushes the L1-D.
+If we do not see that advertised, we fall back to doing a displacement
+flush in software.
+
+For guest kernels we support migration between some CPU versions, and
+different CPUs may use different flush instructions. So that we are
+prepared to migrate to a machine with a different flush instruction
+activated, we may have to patch more than one flush instruction at
+boot if the hypervisor tells us to.
+
+In the end this patch is mostly the work of Nicholas Piggin and
+Michael Ellerman. However a cast of thousands contributed to analysis
+of the issue, earlier versions of the patch, back ports testing etc.
+Many thanks to all of them.
+
+Tested-by: Jon Masters <jcm@redhat.com>
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/exception-64s.h | 40 +++++++++++---
+ arch/powerpc/include/asm/feature-fixups.h | 13 ++++
+ arch/powerpc/include/asm/paca.h | 10 +++
+ arch/powerpc/include/asm/setup.h | 13 ++++
+ arch/powerpc/kernel/asm-offsets.c | 5 +
+ arch/powerpc/kernel/exceptions-64s.S | 84 ++++++++++++++++++++++++++++++
+ arch/powerpc/kernel/setup_64.c | 79 ++++++++++++++++++++++++++++
+ arch/powerpc/kernel/vmlinux.lds.S | 9 +++
+ arch/powerpc/lib/feature-fixups.c | 41 ++++++++++++++
+ 9 files changed, 286 insertions(+), 8 deletions(-)
+
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -69,34 +69,58 @@
+ */
+ #define EX_R3 EX_DAR
+
+-/* Macros for annotating the expected destination of (h)rfid */
++/*
++ * Macros for annotating the expected destination of (h)rfid
++ *
++ * The nop instructions allow us to insert one or more instructions to flush the
++ * L1-D cache when returning to userspace or a guest.
++ */
++#define RFI_FLUSH_SLOT \
++ RFI_FLUSH_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop
+
+ #define RFI_TO_KERNEL \
+ rfid
+
+ #define RFI_TO_USER \
+- rfid
++ RFI_FLUSH_SLOT; \
++ rfid; \
++ b rfi_flush_fallback
+
+ #define RFI_TO_USER_OR_KERNEL \
+- rfid
++ RFI_FLUSH_SLOT; \
++ rfid; \
++ b rfi_flush_fallback
+
+ #define RFI_TO_GUEST \
+- rfid
++ RFI_FLUSH_SLOT; \
++ rfid; \
++ b rfi_flush_fallback
+
+ #define HRFI_TO_KERNEL \
+ hrfid
+
+ #define HRFI_TO_USER \
+- hrfid
++ RFI_FLUSH_SLOT; \
++ hrfid; \
++ b hrfi_flush_fallback
+
+ #define HRFI_TO_USER_OR_KERNEL \
+- hrfid
++ RFI_FLUSH_SLOT; \
++ hrfid; \
++ b hrfi_flush_fallback
+
+ #define HRFI_TO_GUEST \
+- hrfid
++ RFI_FLUSH_SLOT; \
++ hrfid; \
++ b hrfi_flush_fallback
+
+ #define HRFI_TO_UNKNOWN \
+- hrfid
++ RFI_FLUSH_SLOT; \
++ hrfid; \
++ b hrfi_flush_fallback
+
+ #ifdef CONFIG_RELOCATABLE
+ #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -187,7 +187,20 @@ label##3: \
+ FTR_ENTRY_OFFSET label##1b-label##3b; \
+ .popsection;
+
++#define RFI_FLUSH_FIXUP_SECTION \
++951: \
++ .pushsection __rfi_flush_fixup,"a"; \
++ .align 2; \
++952: \
++ FTR_ENTRY_OFFSET 951b-952b; \
++ .popsection;
++
++
+ #ifndef __ASSEMBLY__
++#include <linux/types.h>
++
++extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
++
+ void apply_feature_fixups(void);
+ void setup_feature_keys(void);
+ #endif
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -231,6 +231,16 @@ struct paca_struct {
+ struct sibling_subcore_state *sibling_subcore_state;
+ #endif
+ #endif
++#ifdef CONFIG_PPC_BOOK3S_64
++ /*
++ * rfi fallback flush must be in its own cacheline to prevent
++ * other paca data leaking into the L1d
++ */
++ u64 exrfi[EX_SIZE] __aligned(0x80);
++ void *rfi_flush_fallback_area;
++ u64 l1d_flush_congruence;
++ u64 l1d_flush_sets;
++#endif
+ };
+
+ extern void copy_mm_to_paca(struct mm_struct *mm);
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -39,6 +39,19 @@ static inline void pseries_big_endian_ex
+ static inline void pseries_little_endian_exceptions(void) {}
+ #endif /* CONFIG_PPC_PSERIES */
+
++void rfi_flush_enable(bool enable);
++
++/* These are bit flags */
++enum l1d_flush_type {
++ L1D_FLUSH_NONE = 0x1,
++ L1D_FLUSH_FALLBACK = 0x2,
++ L1D_FLUSH_ORI = 0x4,
++ L1D_FLUSH_MTTRIG = 0x8,
++};
++
++void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
++void do_rfi_flush_fixups(enum l1d_flush_type types);
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_POWERPC_SETUP_H */
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -237,6 +237,11 @@ int main(void)
+ OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp);
+ OFFSET(PACA_IN_MCE, paca_struct, in_mce);
+ OFFSET(PACA_IN_NMI, paca_struct, in_nmi);
++ OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area);
++ OFFSET(PACA_EXRFI, paca_struct, exrfi);
++ OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence);
++ OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets);
++
+ #endif
+ OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
+ OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1434,6 +1434,90 @@ masked_##_H##interrupt: \
+ b .; \
+ MASKED_DEC_HANDLER(_H)
+
++TRAMP_REAL_BEGIN(rfi_flush_fallback)
++ SET_SCRATCH0(r13);
++ GET_PACA(r13);
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ std r11,PACA_EXRFI+EX_R11(r13)
++ std r12,PACA_EXRFI+EX_R12(r13)
++ std r8,PACA_EXRFI+EX_R13(r13)
++ mfctr r9
++ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++ ld r11,PACA_L1D_FLUSH_SETS(r13)
++ ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
++ /*
++ * The load adresses are at staggered offsets within cachelines,
++ * which suits some pipelines better (on others it should not
++ * hurt).
++ */
++ addi r12,r12,8
++ mtctr r11
++ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++ /* order ld/st prior to dcbt stop all streams with flushing */
++ sync
++1: li r8,0
++ .rept 8 /* 8-way set associative */
++ ldx r11,r10,r8
++ add r8,r8,r12
++ xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
++ add r8,r8,r11 // Add 0, this creates a dependency on the ldx
++ .endr
++ addi r10,r10,128 /* 128 byte cache line */
++ bdnz 1b
++
++ mtctr r9
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ld r11,PACA_EXRFI+EX_R11(r13)
++ ld r12,PACA_EXRFI+EX_R12(r13)
++ ld r8,PACA_EXRFI+EX_R13(r13)
++ GET_SCRATCH0(r13);
++ rfid
++
++TRAMP_REAL_BEGIN(hrfi_flush_fallback)
++ SET_SCRATCH0(r13);
++ GET_PACA(r13);
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ std r11,PACA_EXRFI+EX_R11(r13)
++ std r12,PACA_EXRFI+EX_R12(r13)
++ std r8,PACA_EXRFI+EX_R13(r13)
++ mfctr r9
++ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++ ld r11,PACA_L1D_FLUSH_SETS(r13)
++ ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13)
++ /*
++ * The load adresses are at staggered offsets within cachelines,
++ * which suits some pipelines better (on others it should not
++ * hurt).
++ */
++ addi r12,r12,8
++ mtctr r11
++ DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++ /* order ld/st prior to dcbt stop all streams with flushing */
++ sync
++1: li r8,0
++ .rept 8 /* 8-way set associative */
++ ldx r11,r10,r8
++ add r8,r8,r12
++ xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not
++ add r8,r8,r11 // Add 0, this creates a dependency on the ldx
++ .endr
++ addi r10,r10,128 /* 128 byte cache line */
++ bdnz 1b
++
++ mtctr r9
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ld r11,PACA_EXRFI+EX_R11(r13)
++ ld r12,PACA_EXRFI+EX_R12(r13)
++ ld r8,PACA_EXRFI+EX_R13(r13)
++ GET_SCRATCH0(r13);
++ hrfid
++
+ /*
+ * Real mode exceptions actually use this too, but alternate
+ * instruction code patches (which end up in the common .text area)
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -784,3 +784,82 @@ static int __init disable_hardlockup_det
+ return 0;
+ }
+ early_initcall(disable_hardlockup_detector);
++
++#ifdef CONFIG_PPC_BOOK3S_64
++static enum l1d_flush_type enabled_flush_types;
++static void *l1d_flush_fallback_area;
++bool rfi_flush;
++
++static void do_nothing(void *unused)
++{
++ /*
++ * We don't need to do the flush explicitly, just enter+exit kernel is
++ * sufficient, the RFI exit handlers will do the right thing.
++ */
++}
++
++void rfi_flush_enable(bool enable)
++{
++ if (rfi_flush == enable)
++ return;
++
++ if (enable) {
++ do_rfi_flush_fixups(enabled_flush_types);
++ on_each_cpu(do_nothing, NULL, 1);
++ } else
++ do_rfi_flush_fixups(L1D_FLUSH_NONE);
++
++ rfi_flush = enable;
++}
++
++static void init_fallback_flush(void)
++{
++ u64 l1d_size, limit;
++ int cpu;
++
++ l1d_size = ppc64_caches.l1d.size;
++ limit = min(safe_stack_limit(), ppc64_rma_size);
++
++ /*
++ * Align to L1d size, and size it at 2x L1d size, to catch possible
++ * hardware prefetch runoff. We don't have a recipe for load patterns to
++ * reliably avoid the prefetcher.
++ */
++ l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
++ memset(l1d_flush_fallback_area, 0, l1d_size * 2);
++
++ for_each_possible_cpu(cpu) {
++ /*
++ * The fallback flush is currently coded for 8-way
++ * associativity. Different associativity is possible, but it
++ * will be treated as 8-way and may not evict the lines as
++ * effectively.
++ *
++ * 128 byte lines are mandatory.
++ */
++ u64 c = l1d_size / 8;
++
++ paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
++ paca[cpu].l1d_flush_congruence = c;
++ paca[cpu].l1d_flush_sets = c / 128;
++ }
++}
++
++void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
++{
++ if (types & L1D_FLUSH_FALLBACK) {
++ pr_info("rfi-flush: Using fallback displacement flush\n");
++ init_fallback_flush();
++ }
++
++ if (types & L1D_FLUSH_ORI)
++ pr_info("rfi-flush: Using ori type flush\n");
++
++ if (types & L1D_FLUSH_MTTRIG)
++ pr_info("rfi-flush: Using mttrig type flush\n");
++
++ enabled_flush_types = types;
++
++ rfi_flush_enable(enable);
++}
++#endif /* CONFIG_PPC_BOOK3S_64 */
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -132,6 +132,15 @@ SECTIONS
+ /* Read-only data */
+ RO_DATA(PAGE_SIZE)
+
++#ifdef CONFIG_PPC64
++ . = ALIGN(8);
++ __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
++ __start___rfi_flush_fixup = .;
++ *(__rfi_flush_fixup)
++ __stop___rfi_flush_fixup = .;
++ }
++#endif
++
+ EXCEPTION_TABLE(0)
+
+ NOTES :kernel :notes
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long val
+ }
+ }
+
++#ifdef CONFIG_PPC_BOOK3S_64
++void do_rfi_flush_fixups(enum l1d_flush_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___rfi_flush_fixup),
++ end = PTRRELOC(&__stop___rfi_flush_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ if (types & L1D_FLUSH_FALLBACK)
++ /* b .+16 to fallback flush */
++ instrs[0] = 0x48000010;
++
++ i = 0;
++ if (types & L1D_FLUSH_ORI) {
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++ }
++
++ if (types & L1D_FLUSH_MTTRIG)
++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++ patch_instruction(dest + 1, instrs[1]);
++ patch_instruction(dest + 2, instrs[2]);
++ }
++
++ printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
++}
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ {
+ long *start, *end;
+From bc9c9304a45480797e13a8e1df96ffcf44fb62fe Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/64s: Support disabling RFI flush with no_rfi_flush and nopti
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+commit bc9c9304a45480797e13a8e1df96ffcf44fb62fe upstream.
+
+Because there may be some performance overhead of the RFI flush, add
+kernel command line options to disable it.
+
+We add a sensibly named 'no_rfi_flush' option, but we also hijack the
+x86 option 'nopti'. The RFI flush is not the same as KPTI, but if we
+see 'nopti' we can guess that the user is trying to avoid any overhead
+of Meltdown mitigations, and it means we don't have to educate every
+one about a different command line option.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/setup_64.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -788,8 +788,29 @@ early_initcall(disable_hardlockup_detect
+ #ifdef CONFIG_PPC_BOOK3S_64
+ static enum l1d_flush_type enabled_flush_types;
+ static void *l1d_flush_fallback_area;
++static bool no_rfi_flush;
+ bool rfi_flush;
+
++static int __init handle_no_rfi_flush(char *p)
++{
++ pr_info("rfi-flush: disabled on command line.");
++ no_rfi_flush = true;
++ return 0;
++}
++early_param("no_rfi_flush", handle_no_rfi_flush);
++
++/*
++ * The RFI flush is not KPTI, but because users will see doco that says to use
++ * nopti we hijack that option here to also disable the RFI flush.
++ */
++static int __init handle_no_pti(char *p)
++{
++ pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
++ handle_no_rfi_flush(NULL);
++ return 0;
++}
++early_param("nopti", handle_no_pti);
++
+ static void do_nothing(void *unused)
+ {
+ /*
+@@ -860,6 +881,7 @@ void __init setup_rfi_flush(enum l1d_flu
+
+ enabled_flush_types = types;
+
+- rfi_flush_enable(enable);
++ if (!no_rfi_flush)
++ rfi_flush_enable(enable);
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+From 8989d56878a7735dfdb234707a2fee6faf631085 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/pseries: Query hypervisor for RFI flush settings
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 8989d56878a7735dfdb234707a2fee6faf631085 upstream.
+
+A new hypervisor call is available which tells the guest settings
+related to the RFI flush. Use it to query the appropriate flush
+instruction(s), and whether the flush is required.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/setup.c | 35 +++++++++++++++++++++++++++++++++
+ 1 file changed, 35 insertions(+)
+
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(vo
+ of_pci_check_probe_only();
+ }
+
++static void pseries_setup_rfi_flush(void)
++{
++ struct h_cpu_char_result result;
++ enum l1d_flush_type types;
++ bool enable;
++ long rc;
++
++ /* Enable by default */
++ enable = true;
++
++ rc = plpar_get_cpu_characteristics(&result);
++ if (rc == H_SUCCESS) {
++ types = L1D_FLUSH_NONE;
++
++ if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++ types |= L1D_FLUSH_MTTRIG;
++ if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++ types |= L1D_FLUSH_ORI;
++
++ /* Use fallback if nothing set in hcall */
++ if (types == L1D_FLUSH_NONE)
++ types = L1D_FLUSH_FALLBACK;
++
++ if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++ enable = false;
++ } else {
++ /* Default to fallback if case hcall is not available */
++ types = L1D_FLUSH_FALLBACK;
++ }
++
++ setup_rfi_flush(types, enable);
++}
++
+ static void __init pSeries_setup_arch(void)
+ {
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(vo
+
+ fwnmi_init();
+
++ pseries_setup_rfi_flush();
++
+ /* By default, only probe PCI (can be overridden by rtas_pci) */
+ pci_add_flags(PCI_PROBE_ONLY);
+
+From 6e032b350cd1fdb830f18f8320ef0e13b4e24094 Mon Sep 17 00:00:00 2001
+From: Oliver O'Halloran <oohall@gmail.com>
+Date: Wed, 10 Jan 2018 03:07:15 +1100
+Subject: powerpc/powernv: Check device-tree for RFI flush settings
+
+From: Oliver O'Halloran <oohall@gmail.com>
+
+commit 6e032b350cd1fdb830f18f8320ef0e13b4e24094 upstream.
+
+New device-tree properties are available which tell the hypervisor
+settings related to the RFI flush. Use them to determine the
+appropriate flush instruction to use, and whether the flush is
+required.
+
+Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/setup.c | 49 +++++++++++++++++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -36,13 +36,62 @@
+ #include <asm/opal.h>
+ #include <asm/kexec.h>
+ #include <asm/smp.h>
++#include <asm/setup.h>
+
+ #include "powernv.h"
+
++static void pnv_setup_rfi_flush(void)
++{
++ struct device_node *np, *fw_features;
++ enum l1d_flush_type type;
++ int enable;
++
++ /* Default to fallback in case fw-features are not available */
++ type = L1D_FLUSH_FALLBACK;
++ enable = 1;
++
++ np = of_find_node_by_name(NULL, "ibm,opal");
++ fw_features = of_get_child_by_name(np, "fw-features");
++ of_node_put(np);
++
++ if (fw_features) {
++ np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
++ if (np && of_property_read_bool(np, "enabled"))
++ type = L1D_FLUSH_MTTRIG;
++
++ of_node_put(np);
++
++ np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
++ if (np && of_property_read_bool(np, "enabled"))
++ type = L1D_FLUSH_ORI;
++
++ of_node_put(np);
++
++ /* Enable unless firmware says NOT to */
++ enable = 2;
++ np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
++ if (np && of_property_read_bool(np, "disabled"))
++ enable--;
++
++ of_node_put(np);
++
++ np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
++ if (np && of_property_read_bool(np, "disabled"))
++ enable--;
++
++ of_node_put(np);
++ of_node_put(fw_features);
++ }
++
++ setup_rfi_flush(type, enable > 0);
++}
++
+ static void __init pnv_setup_arch(void)
+ {
+ set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+
++ pnv_setup_rfi_flush();
++
+ /* Initialize SMP */
+ pnv_smp_init();
+
diff --git a/freed-ora/current/f27/retpoline.patch b/freed-ora/current/f27/retpoline.patch
deleted file mode 100644
index 88c78fd0e..000000000
--- a/freed-ora/current/f27/retpoline.patch
+++ /dev/null
@@ -1,1480 +0,0 @@
-From 61dc0f555b5c761cdafb0ba5bd41ecf22d68a4c4 Mon Sep 17 00:00:00 2001
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 7 Jan 2018 22:48:01 +0100
-Subject: [PATCH] x86/cpu: Implement CPU vulnerabilites sysfs functions
-
-Implement the CPU vulnerabilty show functions for meltdown, spectre_v1 and
-spectre_v2.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Will Deacon <will.deacon@arm.com>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Linus Torvalds <torvalds@linuxfoundation.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: David Woodhouse <dwmw@amazon.co.uk>
-Link: https://lkml.kernel.org/r/20180107214913.177414879@linutronix.de
----
- arch/x86/Kconfig | 1 +
- arch/x86/kernel/cpu/bugs.c | 29 +++++++++++++++++++++++++++++
- 2 files changed, 30 insertions(+)
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index cd5199de231e..e23d21ac745a 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -89,6 +89,7 @@ config X86
- select GENERIC_CLOCKEVENTS_MIN_ADJUST
- select GENERIC_CMOS_UPDATE
- select GENERIC_CPU_AUTOPROBE
-+ select GENERIC_CPU_VULNERABILITIES
- select GENERIC_EARLY_IOREMAP
- select GENERIC_FIND_FIRST_BIT
- select GENERIC_IOMAP
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index ba0b2424c9b0..76ad6cb44b40 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -10,6 +10,7 @@
- */
- #include <linux/init.h>
- #include <linux/utsname.h>
-+#include <linux/cpu.h>
- #include <asm/bugs.h>
- #include <asm/processor.h>
- #include <asm/processor-flags.h>
-@@ -60,3 +61,31 @@ void __init check_bugs(void)
- set_memory_4k((unsigned long)__va(0), 1);
- #endif
- }
-+
-+#ifdef CONFIG_SYSFS
-+ssize_t cpu_show_meltdown(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
-+ return sprintf(buf, "Not affected\n");
-+ if (boot_cpu_has(X86_FEATURE_PTI))
-+ return sprintf(buf, "Mitigation: PTI\n");
-+ return sprintf(buf, "Vulnerable\n");
-+}
-+
-+ssize_t cpu_show_spectre_v1(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
-+ return sprintf(buf, "Not affected\n");
-+ return sprintf(buf, "Vulnerable\n");
-+}
-+
-+ssize_t cpu_show_spectre_v2(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-+ return sprintf(buf, "Not affected\n");
-+ return sprintf(buf, "Vulnerable\n");
-+}
-+#endif
---
-2.14.3
-
-From d46717c610dcfa2cba5c87500c928993371ef1ad Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:07 +0000
-Subject: [PATCH 01/10] x86/retpoline: Add initial retpoline support
-
-Enable the use of -mindirect-branch=thunk-extern in newer GCC, and provide
-the corresponding thunks. Provide assembler macros for invoking the thunks
-in the same way that GCC does, from native and inline assembler.
-
-This adds X86_FEATURE_RETPOLINE and sets it by default on all CPUs. In
-some circumstances, IBRS microcode features may be used instead, and the
-retpoline can be disabled.
-
-On AMD CPUs if lfence is serialising, the retpoline can be dramatically
-simplified to a simple "lfence; jmp *\reg". A future patch, after it has
-been verified that lfence really is serialising in all circumstances, can
-enable this by setting the X86_FEATURE_RETPOLINE_AMD feature bit in addition
-to X86_FEATURE_RETPOLINE.
-
-Do not align the retpoline in the altinstr section, because there is no
-guarantee that it stays aligned when it's copied over the oldinstr during
-alternative patching.
-
-[ Andi Kleen: Rename the macros, add CONFIG_RETPOLINE option, export thunks]
-[ tglx: Put actual function CALL/JMP in front of the macros, convert to
- symbolic labels ]
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-2-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/Kconfig | 13 ++++
- arch/x86/Makefile | 10 ++++
- arch/x86/include/asm/asm-prototypes.h | 25 ++++++++
- arch/x86/include/asm/cpufeatures.h | 2 +
- arch/x86/include/asm/nospec-branch.h | 109 ++++++++++++++++++++++++++++++++++
- arch/x86/kernel/cpu/common.c | 4 ++
- arch/x86/lib/Makefile | 1 +
- arch/x86/lib/retpoline.S | 48 +++++++++++++++
- 8 files changed, 212 insertions(+)
- create mode 100644 arch/x86/include/asm/nospec-branch.h
- create mode 100644 arch/x86/lib/retpoline.S
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index e23d21ac745a..d1819161cc6c 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -429,6 +429,19 @@ config GOLDFISH
- def_bool y
- depends on X86_GOLDFISH
-
-+config RETPOLINE
-+ bool "Avoid speculative indirect branches in kernel"
-+ default y
-+ help
-+ Compile kernel with the retpoline compiler options to guard against
-+ kernel-to-user data leaks by avoiding speculative indirect
-+ branches. Requires a compiler with -mindirect-branch=thunk-extern
-+ support for full protection. The kernel may run slower.
-+
-+ Without compiler support, at least indirect branches in assembler
-+ code are eliminated. Since this includes the syscall entry path,
-+ it is not entirely pointless.
-+
- config INTEL_RDT
- bool "Intel Resource Director Technology support"
- default n
-diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index a20eacd9c7e9..974c61864978 100644
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -235,6 +235,16 @@ KBUILD_CFLAGS += -Wno-sign-compare
- #
- KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-
-+# Avoid indirect branches in kernel to deal with Spectre
-+ifdef CONFIG_RETPOLINE
-+ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
-+ ifneq ($(RETPOLINE_CFLAGS),)
-+ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
-+ else
-+ $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.)
-+ endif
-+endif
-+
- archscripts: scripts_basic
- $(Q)$(MAKE) $(build)=arch/x86/tools relocs
-
-diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
-index ff700d81e91e..0927cdc4f946 100644
---- a/arch/x86/include/asm/asm-prototypes.h
-+++ b/arch/x86/include/asm/asm-prototypes.h
-@@ -11,7 +11,32 @@
- #include <asm/pgtable.h>
- #include <asm/special_insns.h>
- #include <asm/preempt.h>
-+#include <asm/asm.h>
-
- #ifndef CONFIG_X86_CMPXCHG64
- extern void cmpxchg8b_emu(void);
- #endif
-+
-+#ifdef CONFIG_RETPOLINE
-+#ifdef CONFIG_X86_32
-+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
-+#else
-+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
-+INDIRECT_THUNK(8)
-+INDIRECT_THUNK(9)
-+INDIRECT_THUNK(10)
-+INDIRECT_THUNK(11)
-+INDIRECT_THUNK(12)
-+INDIRECT_THUNK(13)
-+INDIRECT_THUNK(14)
-+INDIRECT_THUNK(15)
-+#endif
-+INDIRECT_THUNK(ax)
-+INDIRECT_THUNK(bx)
-+INDIRECT_THUNK(cx)
-+INDIRECT_THUNK(dx)
-+INDIRECT_THUNK(si)
-+INDIRECT_THUNK(di)
-+INDIRECT_THUNK(bp)
-+INDIRECT_THUNK(sp)
-+#endif /* CONFIG_RETPOLINE */
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
-index 1641c2f96363..f275447862f4 100644
---- a/arch/x86/include/asm/cpufeatures.h
-+++ b/arch/x86/include/asm/cpufeatures.h
-@@ -203,6 +203,8 @@
- #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
- #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
- #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
-+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
-+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
- #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
- #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
- #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
-new file mode 100644
-index 000000000000..7f58713b27c4
---- /dev/null
-+++ b/arch/x86/include/asm/nospec-branch.h
-@@ -0,0 +1,109 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+
-+#ifndef __NOSPEC_BRANCH_H__
-+#define __NOSPEC_BRANCH_H__
-+
-+#include <asm/alternative.h>
-+#include <asm/alternative-asm.h>
-+#include <asm/cpufeatures.h>
-+
-+#ifdef __ASSEMBLY__
-+
-+/*
-+ * These are the bare retpoline primitives for indirect jmp and call.
-+ * Do not use these directly; they only exist to make the ALTERNATIVE
-+ * invocation below less ugly.
-+ */
-+.macro RETPOLINE_JMP reg:req
-+ call .Ldo_rop_\@
-+.Lspec_trap_\@:
-+ pause
-+ jmp .Lspec_trap_\@
-+.Ldo_rop_\@:
-+ mov \reg, (%_ASM_SP)
-+ ret
-+.endm
-+
-+/*
-+ * This is a wrapper around RETPOLINE_JMP so the called function in reg
-+ * returns to the instruction after the macro.
-+ */
-+.macro RETPOLINE_CALL reg:req
-+ jmp .Ldo_call_\@
-+.Ldo_retpoline_jmp_\@:
-+ RETPOLINE_JMP \reg
-+.Ldo_call_\@:
-+ call .Ldo_retpoline_jmp_\@
-+.endm
-+
-+/*
-+ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
-+ * indirect jmp/call which may be susceptible to the Spectre variant 2
-+ * attack.
-+ */
-+.macro JMP_NOSPEC reg:req
-+#ifdef CONFIG_RETPOLINE
-+ ALTERNATIVE_2 __stringify(jmp *\reg), \
-+ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
-+ __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
-+#else
-+ jmp *\reg
-+#endif
-+.endm
-+
-+.macro CALL_NOSPEC reg:req
-+#ifdef CONFIG_RETPOLINE
-+ ALTERNATIVE_2 __stringify(call *\reg), \
-+ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
-+ __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
-+#else
-+ call *\reg
-+#endif
-+.endm
-+
-+#else /* __ASSEMBLY__ */
-+
-+#if defined(CONFIG_X86_64) && defined(RETPOLINE)
-+/*
-+ * Since the inline asm uses the %V modifier which is only in newer GCC,
-+ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
-+ */
-+# define CALL_NOSPEC ALTERNATIVE( \
-+ "call *%[thunk_target]\n", \
-+ "call __x86_indirect_thunk_%V[thunk_target]\n", \
-+ X86_FEATURE_RETPOLINE)
-+# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
-+#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
-+/*
-+ * For i386 we use the original ret-equivalent retpoline, because
-+ * otherwise we'll run out of registers. We don't care about CET
-+ * here, anyway.
-+ */
-+# define CALL_NOSPEC ALTERNATIVE( \
-+ "call *%[thunk_target]\n", \
-+ "" \
-+ " jmp do_call%=;\n" \
-+ " .align 16\n" \
-+ "do_retpoline%=:\n" \
-+ " call do_rop%=;\n" \
-+ "spec_trap%=:\n" \
-+ " pause;\n" \
-+ " jmp spec_trap%=;\n" \
-+ " .align 16\n" \
-+ "do_rop%=:\n" \
-+ " addl $4, %%esp;\n" \
-+ " pushl %[thunk_target];\n" \
-+ " ret;\n" \
-+ " .align 16\n" \
-+ "do_call%=:\n" \
-+ " call do_retpoline%=;\n", \
-+ X86_FEATURE_RETPOLINE)
-+
-+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
-+#else /* No retpoline */
-+# define CALL_NOSPEC "call *%[thunk_target]\n"
-+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
-+#endif
-+
-+#endif /* __ASSEMBLY__ */
-+#endif /* __NOSPEC_BRANCH_H__ */
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 372ba3fb400f..7a671d1ae3cb 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -905,6 +905,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-
-+#ifdef CONFIG_RETPOLINE
-+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
-+#endif
-+
- fpu__init_system(c);
-
- #ifdef CONFIG_X86_32
-diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
-index 457f681ef379..d435c89875c1 100644
---- a/arch/x86/lib/Makefile
-+++ b/arch/x86/lib/Makefile
-@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
- lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
- lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
- lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
-+lib-$(CONFIG_RETPOLINE) += retpoline.o
-
- obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
-
-diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
-new file mode 100644
-index 000000000000..cb45c6cb465f
---- /dev/null
-+++ b/arch/x86/lib/retpoline.S
-@@ -0,0 +1,48 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+
-+#include <linux/stringify.h>
-+#include <linux/linkage.h>
-+#include <asm/dwarf2.h>
-+#include <asm/cpufeatures.h>
-+#include <asm/alternative-asm.h>
-+#include <asm/export.h>
-+#include <asm/nospec-branch.h>
-+
-+.macro THUNK reg
-+ .section .text.__x86.indirect_thunk.\reg
-+
-+ENTRY(__x86_indirect_thunk_\reg)
-+ CFI_STARTPROC
-+ JMP_NOSPEC %\reg
-+ CFI_ENDPROC
-+ENDPROC(__x86_indirect_thunk_\reg)
-+.endm
-+
-+/*
-+ * Despite being an assembler file we can't just use .irp here
-+ * because __KSYM_DEPS__ only uses the C preprocessor and would
-+ * only see one instance of "__x86_indirect_thunk_\reg" rather
-+ * than one per register with the correct names. So we do it
-+ * the simple and nasty way...
-+ */
-+#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
-+#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
-+
-+GENERATE_THUNK(_ASM_AX)
-+GENERATE_THUNK(_ASM_BX)
-+GENERATE_THUNK(_ASM_CX)
-+GENERATE_THUNK(_ASM_DX)
-+GENERATE_THUNK(_ASM_SI)
-+GENERATE_THUNK(_ASM_DI)
-+GENERATE_THUNK(_ASM_BP)
-+GENERATE_THUNK(_ASM_SP)
-+#ifdef CONFIG_64BIT
-+GENERATE_THUNK(r8)
-+GENERATE_THUNK(r9)
-+GENERATE_THUNK(r10)
-+GENERATE_THUNK(r11)
-+GENERATE_THUNK(r12)
-+GENERATE_THUNK(r13)
-+GENERATE_THUNK(r14)
-+GENERATE_THUNK(r15)
-+#endif
---
-2.14.3
-
-From 59b6e22f92f9a86dbd0798db72adc97bdb831f86 Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Tue, 9 Jan 2018 14:43:08 +0000
-Subject: [PATCH 02/10] x86/retpoline: Temporarily disable objtool when
- CONFIG_RETPOLINE=y
-
-objtool's assembler currently cannot deal with the code generated by the
-retpoline compiler and throws hundreds of warnings, mostly because it sees
-calls that don't have a symbolic target.
-
-Exclude all the options that rely on objtool when RETPOLINE is active.
-
-This mainly means that the kernel has to fallback to use the frame pointer
-unwinder and livepatch is not supported.
-
-Josh is looking into resolving the issue.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-3-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/Kconfig | 4 ++--
- arch/x86/Kconfig.debug | 6 +++---
- 2 files changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index d1819161cc6c..abeac4b80b74 100644
---- a/arch/x86/Kconfig
-+++ b/arch/x86/Kconfig
-@@ -172,8 +172,8 @@ config X86
- select HAVE_PERF_USER_STACK_DUMP
- select HAVE_RCU_TABLE_FREE
- select HAVE_REGS_AND_STACK_ACCESS_API
-- select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
-- select HAVE_STACK_VALIDATION if X86_64
-+ select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION && !RETPOLINE
-+ select HAVE_STACK_VALIDATION if X86_64 && !RETPOLINE
- select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_UNSTABLE_SCHED_CLOCK
- select HAVE_USER_RETURN_NOTIFIER
-diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 6293a8768a91..9f3928d744bc 100644
---- a/arch/x86/Kconfig.debug
-+++ b/arch/x86/Kconfig.debug
-@@ -359,8 +359,8 @@ config PUNIT_ATOM_DEBUG
-
- choice
- prompt "Choose kernel unwinder"
-- default UNWINDER_ORC if X86_64
-- default UNWINDER_FRAME_POINTER if X86_32
-+ default UNWINDER_ORC if X86_64 && !RETPOLINE
-+ default UNWINDER_FRAME_POINTER if X86_32 || RETPOLINE
- ---help---
- This determines which method will be used for unwinding kernel stack
- traces for panics, oopses, bugs, warnings, perf, /proc/<pid>/stack,
-@@ -368,7 +368,7 @@ choice
-
- config UNWINDER_ORC
- bool "ORC unwinder"
-- depends on X86_64
-+ depends on X86_64 && !RETPOLINE
- select STACK_VALIDATION
- ---help---
- This option enables the ORC (Oops Rewind Capability) unwinder for
---
-2.14.3
-
-From 86d057614112971f7d5bbac45f67869adca79852 Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:09 +0000
-Subject: [PATCH 03/10] x86/spectre: Add boot time option to select Spectre v2
- mitigation
-
-Add a spectre_v2= option to select the mitigation used for the indirect
-branch speculation vulnerability.
-
-Currently, the only option available is retpoline, in its various forms.
-This will be expanded to cover the new IBRS/IBPB microcode features.
-
-The RETPOLINE_AMD feature relies on a serializing LFENCE for speculation
-control. For AMD hardware, only set RETPOLINE_AMD if LFENCE is a
-serializing instruction, which is indicated by the LFENCE_RDTSC feature.
-
-[ tglx: Folded back the LFENCE/AMD fixes and reworked it so IBRS
- integration becomes simple ]
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Cc: Tom Lendacky <thomas.lendacky@amd.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-4-git-send-email-dwmw@amazon.co.uk
----
- Documentation/admin-guide/kernel-parameters.txt | 28 +++++
- arch/x86/include/asm/nospec-branch.h | 10 ++
- arch/x86/kernel/cpu/bugs.c | 158 +++++++++++++++++++++++-
- arch/x86/kernel/cpu/common.c | 4 -
- 4 files changed, 195 insertions(+), 5 deletions(-)
-
-diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 905991745d26..8122b5f98ea1 100644
---- a/Documentation/admin-guide/kernel-parameters.txt
-+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -2599,6 +2599,11 @@
- nosmt [KNL,S390] Disable symmetric multithreading (SMT).
- Equivalent to smt=1.
-
-+ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
-+ (indirect branch prediction) vulnerability. System may
-+ allow data leaks with this option, which is equivalent
-+ to spectre_v2=off.
-+
- noxsave [BUGS=X86] Disables x86 extended register state save
- and restore using xsave. The kernel will fallback to
- enabling legacy floating-point and sse state.
-@@ -3908,6 +3913,29 @@
- sonypi.*= [HW] Sony Programmable I/O Control Device driver
- See Documentation/laptops/sonypi.txt
-
-+ spectre_v2= [X86] Control mitigation of Spectre variant 2
-+ (indirect branch speculation) vulnerability.
-+
-+ on - unconditionally enable
-+ off - unconditionally disable
-+ auto - kernel detects whether your CPU model is
-+ vulnerable
-+
-+ Selecting 'on' will, and 'auto' may, choose a
-+ mitigation method at run time according to the
-+ CPU, the available microcode, the setting of the
-+ CONFIG_RETPOLINE configuration option, and the
-+ compiler with which the kernel was built.
-+
-+ Specific mitigations can also be selected manually:
-+
-+ retpoline - replace indirect branches
-+ retpoline,generic - google's original retpoline
-+ retpoline,amd - AMD-specific minimal thunk
-+
-+ Not specifying this option is equivalent to
-+ spectre_v2=auto.
-+
- spia_io_base= [HW,MTD]
- spia_fio_base=
- spia_pedr=
-diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
-index 7f58713b27c4..7d70ea977fbe 100644
---- a/arch/x86/include/asm/nospec-branch.h
-+++ b/arch/x86/include/asm/nospec-branch.h
-@@ -105,5 +105,15 @@
- # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
- #endif
-
-+/* The Spectre V2 mitigation variants */
-+enum spectre_v2_mitigation {
-+ SPECTRE_V2_NONE,
-+ SPECTRE_V2_RETPOLINE_MINIMAL,
-+ SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
-+ SPECTRE_V2_RETPOLINE_GENERIC,
-+ SPECTRE_V2_RETPOLINE_AMD,
-+ SPECTRE_V2_IBRS,
-+};
-+
- #endif /* __ASSEMBLY__ */
- #endif /* __NOSPEC_BRANCH_H__ */
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index 76ad6cb44b40..e4dc26185aa7 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -11,6 +11,9 @@
- #include <linux/init.h>
- #include <linux/utsname.h>
- #include <linux/cpu.h>
-+
-+#include <asm/nospec-branch.h>
-+#include <asm/cmdline.h>
- #include <asm/bugs.h>
- #include <asm/processor.h>
- #include <asm/processor-flags.h>
-@@ -21,6 +24,8 @@
- #include <asm/pgtable.h>
- #include <asm/set_memory.h>
-
-+static void __init spectre_v2_select_mitigation(void);
-+
- void __init check_bugs(void)
- {
- identify_boot_cpu();
-@@ -30,6 +35,9 @@ void __init check_bugs(void)
- print_cpu_info(&boot_cpu_data);
- }
-
-+ /* Select the proper spectre mitigation before patching alternatives */
-+ spectre_v2_select_mitigation();
-+
- #ifdef CONFIG_X86_32
- /*
- * Check whether we are able to run this kernel safely on SMP.
-@@ -62,6 +70,153 @@ void __init check_bugs(void)
- #endif
- }
-
-+/* The kernel command line selection */
-+enum spectre_v2_mitigation_cmd {
-+ SPECTRE_V2_CMD_NONE,
-+ SPECTRE_V2_CMD_AUTO,
-+ SPECTRE_V2_CMD_FORCE,
-+ SPECTRE_V2_CMD_RETPOLINE,
-+ SPECTRE_V2_CMD_RETPOLINE_GENERIC,
-+ SPECTRE_V2_CMD_RETPOLINE_AMD,
-+};
-+
-+static const char *spectre_v2_strings[] = {
-+ [SPECTRE_V2_NONE] = "Vulnerable",
-+ [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
-+ [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
-+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
-+ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
-+};
-+
-+#undef pr_fmt
-+#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
-+
-+static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
-+
-+static void __init spec2_print_if_insecure(const char *reason)
-+{
-+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-+ pr_info("%s\n", reason);
-+}
-+
-+static void __init spec2_print_if_secure(const char *reason)
-+{
-+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-+ pr_info("%s\n", reason);
-+}
-+
-+static inline bool retp_compiler(void)
-+{
-+ return __is_defined(RETPOLINE);
-+}
-+
-+static inline bool match_option(const char *arg, int arglen, const char *opt)
-+{
-+ int len = strlen(opt);
-+
-+ return len == arglen && !strncmp(arg, opt, len);
-+}
-+
-+static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
-+{
-+ char arg[20];
-+ int ret;
-+
-+ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
-+ sizeof(arg));
-+ if (ret > 0) {
-+ if (match_option(arg, ret, "off")) {
-+ goto disable;
-+ } else if (match_option(arg, ret, "on")) {
-+ spec2_print_if_secure("force enabled on command line.");
-+ return SPECTRE_V2_CMD_FORCE;
-+ } else if (match_option(arg, ret, "retpoline")) {
-+ spec2_print_if_insecure("retpoline selected on command line.");
-+ return SPECTRE_V2_CMD_RETPOLINE;
-+ } else if (match_option(arg, ret, "retpoline,amd")) {
-+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
-+ pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
-+ return SPECTRE_V2_CMD_AUTO;
-+ }
-+ spec2_print_if_insecure("AMD retpoline selected on command line.");
-+ return SPECTRE_V2_CMD_RETPOLINE_AMD;
-+ } else if (match_option(arg, ret, "retpoline,generic")) {
-+ spec2_print_if_insecure("generic retpoline selected on command line.");
-+ return SPECTRE_V2_CMD_RETPOLINE_GENERIC;
-+ } else if (match_option(arg, ret, "auto")) {
-+ return SPECTRE_V2_CMD_AUTO;
-+ }
-+ }
-+
-+ if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
-+ return SPECTRE_V2_CMD_AUTO;
-+disable:
-+ spec2_print_if_insecure("disabled on command line.");
-+ return SPECTRE_V2_CMD_NONE;
-+}
-+
-+static void __init spectre_v2_select_mitigation(void)
-+{
-+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
-+ enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
-+
-+ /*
-+ * If the CPU is not affected and the command line mode is NONE or AUTO
-+ * then nothing to do.
-+ */
-+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
-+ (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
-+ return;
-+
-+ switch (cmd) {
-+ case SPECTRE_V2_CMD_NONE:
-+ return;
-+
-+ case SPECTRE_V2_CMD_FORCE:
-+ /* FALLTRHU */
-+ case SPECTRE_V2_CMD_AUTO:
-+ goto retpoline_auto;
-+
-+ case SPECTRE_V2_CMD_RETPOLINE_AMD:
-+ if (IS_ENABLED(CONFIG_RETPOLINE))
-+ goto retpoline_amd;
-+ break;
-+ case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
-+ if (IS_ENABLED(CONFIG_RETPOLINE))
-+ goto retpoline_generic;
-+ break;
-+ case SPECTRE_V2_CMD_RETPOLINE:
-+ if (IS_ENABLED(CONFIG_RETPOLINE))
-+ goto retpoline_auto;
-+ break;
-+ }
-+ pr_err("kernel not compiled with retpoline; no mitigation available!");
-+ return;
-+
-+retpoline_auto:
-+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
-+ retpoline_amd:
-+ if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
-+ pr_err("LFENCE not serializing. Switching to generic retpoline\n");
-+ goto retpoline_generic;
-+ }
-+ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
-+ SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
-+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
-+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
-+ } else {
-+ retpoline_generic:
-+ mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
-+ SPECTRE_V2_RETPOLINE_MINIMAL;
-+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
-+ }
-+
-+ spectre_v2_enabled = mode;
-+ pr_info("%s\n", spectre_v2_strings[mode]);
-+}
-+
-+#undef pr_fmt
-+
- #ifdef CONFIG_SYSFS
- ssize_t cpu_show_meltdown(struct device *dev,
- struct device_attribute *attr, char *buf)
-@@ -86,6 +241,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
- {
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- return sprintf(buf, "Not affected\n");
-- return sprintf(buf, "Vulnerable\n");
-+
-+ return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
- }
- #endif
-diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index 7a671d1ae3cb..372ba3fb400f 100644
---- a/arch/x86/kernel/cpu/common.c
-+++ b/arch/x86/kernel/cpu/common.c
-@@ -905,10 +905,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-
--#ifdef CONFIG_RETPOLINE
-- setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
--#endif
--
- fpu__init_system(c);
-
- #ifdef CONFIG_X86_32
---
-2.14.3
-
-From b3a96862283e68914d1f74f160ab980dacf811ee Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:10 +0000
-Subject: [PATCH 04/10] x86/retpoline/crypto: Convert crypto assembler indirect
- jumps
-
-Convert all indirect jumps in crypto assembler code to use non-speculative
-sequences when CONFIG_RETPOLINE is enabled.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-5-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/crypto/aesni-intel_asm.S | 5 +++--
- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++-
- arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++-
- arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++-
- 4 files changed, 9 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
-index 16627fec80b2..3d09e3aca18d 100644
---- a/arch/x86/crypto/aesni-intel_asm.S
-+++ b/arch/x86/crypto/aesni-intel_asm.S
-@@ -32,6 +32,7 @@
- #include <linux/linkage.h>
- #include <asm/inst.h>
- #include <asm/frame.h>
-+#include <asm/nospec-branch.h>
-
- /*
- * The following macros are used to move an (un)aligned 16 byte value to/from
-@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
- pxor INC, STATE4
- movdqu IV, 0x30(OUTP)
-
-- call *%r11
-+ CALL_NOSPEC %r11
-
- movdqu 0x00(OUTP), INC
- pxor INC, STATE1
-@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
- _aesni_gf128mul_x_ble()
- movups IV, (IVP)
-
-- call *%r11
-+ CALL_NOSPEC %r11
-
- movdqu 0x40(OUTP), INC
- pxor INC, STATE1
-diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-index f7c495e2863c..a14af6eb09cb 100644
---- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-@@ -17,6 +17,7 @@
-
- #include <linux/linkage.h>
- #include <asm/frame.h>
-+#include <asm/nospec-branch.h>
-
- #define CAMELLIA_TABLE_BYTE_LEN 272
-
-@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
- vpxor 14 * 16(%rax), %xmm15, %xmm14;
- vpxor 15 * 16(%rax), %xmm15, %xmm15;
-
-- call *%r9;
-+ CALL_NOSPEC %r9;
-
- addq $(16 * 16), %rsp;
-
-diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-index eee5b3982cfd..b66bbfa62f50 100644
---- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-@@ -12,6 +12,7 @@
-
- #include <linux/linkage.h>
- #include <asm/frame.h>
-+#include <asm/nospec-branch.h>
-
- #define CAMELLIA_TABLE_BYTE_LEN 272
-
-@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
- vpxor 14 * 32(%rax), %ymm15, %ymm14;
- vpxor 15 * 32(%rax), %ymm15, %ymm15;
-
-- call *%r9;
-+ CALL_NOSPEC %r9;
-
- addq $(16 * 32), %rsp;
-
-diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-index 7a7de27c6f41..d9b734d0c8cc 100644
---- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-@@ -45,6 +45,7 @@
-
- #include <asm/inst.h>
- #include <linux/linkage.h>
-+#include <asm/nospec-branch.h>
-
- ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
-
-@@ -172,7 +173,7 @@ continue_block:
- movzxw (bufp, %rax, 2), len
- lea crc_array(%rip), bufp
- lea (bufp, len, 1), bufp
-- jmp *bufp
-+ JMP_NOSPEC bufp
-
- ################################################################
- ## 2a) PROCESS FULL BLOCKS:
---
-2.14.3
-
-From 2558106c7a47e16968a10fa66eea78a096fabfe6 Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:11 +0000
-Subject: [PATCH 05/10] x86/retpoline/entry: Convert entry assembler indirect
- jumps
-
-Convert indirect jumps in core 32/64bit entry assembler code to use
-non-speculative sequences when CONFIG_RETPOLINE is enabled.
-
-Don't use CALL_NOSPEC in entry_SYSCALL_64_fastpath because the return
-address after the 'call' instruction must be *precisely* at the
-.Lentry_SYSCALL_64_after_fastpath label for stub_ptregs_64 to work,
-and the use of alternatives will mess that up unless we play horrid
-games to prepend with NOPs and make the variants the same length. It's
-not worth it; in the case where we ALTERNATIVE out the retpoline, the
-first instruction at __x86.indirect_thunk.rax is going to be a bare
-jmp *%rax anyway.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-6-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/entry/entry_32.S | 5 +++--
- arch/x86/entry/entry_64.S | 12 +++++++++---
- 2 files changed, 12 insertions(+), 5 deletions(-)
-
-diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
-index ace8f321a5a1..a1f28a54f23a 100644
---- a/arch/x86/entry/entry_32.S
-+++ b/arch/x86/entry/entry_32.S
-@@ -44,6 +44,7 @@
- #include <asm/asm.h>
- #include <asm/smap.h>
- #include <asm/frame.h>
-+#include <asm/nospec-branch.h>
-
- .section .entry.text, "ax"
-
-@@ -290,7 +291,7 @@ ENTRY(ret_from_fork)
-
- /* kernel thread */
- 1: movl %edi, %eax
-- call *%ebx
-+ CALL_NOSPEC %ebx
- /*
- * A kernel thread is allowed to return here after successfully
- * calling do_execve(). Exit to userspace to complete the execve()
-@@ -919,7 +920,7 @@ common_exception:
- movl %ecx, %es
- TRACE_IRQS_OFF
- movl %esp, %eax # pt_regs pointer
-- call *%edi
-+ CALL_NOSPEC %edi
- jmp ret_from_exception
- END(common_exception)
-
-diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index ed31d00dc5ee..59874bc1aed2 100644
---- a/arch/x86/entry/entry_64.S
-+++ b/arch/x86/entry/entry_64.S
-@@ -37,6 +37,7 @@
- #include <asm/pgtable_types.h>
- #include <asm/export.h>
- #include <asm/frame.h>
-+#include <asm/nospec-branch.h>
- #include <linux/err.h>
-
- #include "calling.h"
-@@ -187,7 +188,7 @@ ENTRY(entry_SYSCALL_64_trampoline)
- */
- pushq %rdi
- movq $entry_SYSCALL_64_stage2, %rdi
-- jmp *%rdi
-+ JMP_NOSPEC %rdi
- END(entry_SYSCALL_64_trampoline)
-
- .popsection
-@@ -266,7 +267,12 @@ entry_SYSCALL_64_fastpath:
- * It might end up jumping to the slow path. If it jumps, RAX
- * and all argument registers are clobbered.
- */
-+#ifdef CONFIG_RETPOLINE
-+ movq sys_call_table(, %rax, 8), %rax
-+ call __x86_indirect_thunk_rax
-+#else
- call *sys_call_table(, %rax, 8)
-+#endif
- .Lentry_SYSCALL_64_after_fastpath_call:
-
- movq %rax, RAX(%rsp)
-@@ -438,7 +444,7 @@ ENTRY(stub_ptregs_64)
- jmp entry_SYSCALL64_slow_path
-
- 1:
-- jmp *%rax /* Called from C */
-+ JMP_NOSPEC %rax /* Called from C */
- END(stub_ptregs_64)
-
- .macro ptregs_stub func
-@@ -517,7 +523,7 @@ ENTRY(ret_from_fork)
- 1:
- /* kernel thread */
- movq %r12, %rdi
-- call *%rbx
-+ CALL_NOSPEC %rbx
- /*
- * A kernel thread is allowed to return here after successfully
- * calling do_execve(). Exit to userspace to complete the execve()
---
-2.14.3
-
-From 42f7c812022441ffba2d5ccca3acf6380201f19e Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:12 +0000
-Subject: [PATCH 06/10] x86/retpoline/ftrace: Convert ftrace assembler indirect
- jumps
-
-Convert all indirect jumps in ftrace assembler code to use non-speculative
-sequences when CONFIG_RETPOLINE is enabled.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-7-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/kernel/ftrace_32.S | 6 ++++--
- arch/x86/kernel/ftrace_64.S | 8 ++++----
- 2 files changed, 8 insertions(+), 6 deletions(-)
-
-diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
-index b6c6468e10bc..4c8440de3355 100644
---- a/arch/x86/kernel/ftrace_32.S
-+++ b/arch/x86/kernel/ftrace_32.S
-@@ -8,6 +8,7 @@
- #include <asm/segment.h>
- #include <asm/export.h>
- #include <asm/ftrace.h>
-+#include <asm/nospec-branch.h>
-
- #ifdef CC_USING_FENTRY
- # define function_hook __fentry__
-@@ -197,7 +198,8 @@ ftrace_stub:
- movl 0x4(%ebp), %edx
- subl $MCOUNT_INSN_SIZE, %eax
-
-- call *ftrace_trace_function
-+ movl ftrace_trace_function, %ecx
-+ CALL_NOSPEC %ecx
-
- popl %edx
- popl %ecx
-@@ -241,5 +243,5 @@ return_to_handler:
- movl %eax, %ecx
- popl %edx
- popl %eax
-- jmp *%ecx
-+ JMP_NOSPEC %ecx
- #endif
-diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
-index c832291d948a..7cb8ba08beb9 100644
---- a/arch/x86/kernel/ftrace_64.S
-+++ b/arch/x86/kernel/ftrace_64.S
-@@ -7,7 +7,7 @@
- #include <asm/ptrace.h>
- #include <asm/ftrace.h>
- #include <asm/export.h>
--
-+#include <asm/nospec-branch.h>
-
- .code64
- .section .entry.text, "ax"
-@@ -286,8 +286,8 @@ trace:
- * ip and parent ip are used and the list function is called when
- * function tracing is enabled.
- */
-- call *ftrace_trace_function
--
-+ movq ftrace_trace_function, %r8
-+ CALL_NOSPEC %r8
- restore_mcount_regs
-
- jmp fgraph_trace
-@@ -329,5 +329,5 @@ GLOBAL(return_to_handler)
- movq 8(%rsp), %rdx
- movq (%rsp), %rax
- addq $24, %rsp
-- jmp *%rdi
-+ JMP_NOSPEC %rdi
- #endif
---
-2.14.3
-
-From f14fd95d2f3e611619756ea3c008aee3b4bd4978 Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:13 +0000
-Subject: [PATCH 07/10] x86/retpoline/hyperv: Convert assembler indirect jumps
-
-Convert all indirect jumps in hyperv inline asm code to use non-speculative
-sequences when CONFIG_RETPOLINE is enabled.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-8-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/include/asm/mshyperv.h | 18 ++++++++++--------
- 1 file changed, 10 insertions(+), 8 deletions(-)
-
-diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
-index 581bb54dd464..5119e4b555cc 100644
---- a/arch/x86/include/asm/mshyperv.h
-+++ b/arch/x86/include/asm/mshyperv.h
-@@ -7,6 +7,7 @@
- #include <linux/nmi.h>
- #include <asm/io.h>
- #include <asm/hyperv.h>
-+#include <asm/nospec-branch.h>
-
- /*
- * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
-@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
- return U64_MAX;
-
- __asm__ __volatile__("mov %4, %%r8\n"
-- "call *%5"
-+ CALL_NOSPEC
- : "=a" (hv_status), ASM_CALL_CONSTRAINT,
- "+c" (control), "+d" (input_address)
-- : "r" (output_address), "m" (hv_hypercall_pg)
-+ : "r" (output_address),
-+ THUNK_TARGET(hv_hypercall_pg)
- : "cc", "memory", "r8", "r9", "r10", "r11");
- #else
- u32 input_address_hi = upper_32_bits(input_address);
-@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
- if (!hv_hypercall_pg)
- return U64_MAX;
-
-- __asm__ __volatile__("call *%7"
-+ __asm__ __volatile__(CALL_NOSPEC
- : "=A" (hv_status),
- "+c" (input_address_lo), ASM_CALL_CONSTRAINT
- : "A" (control),
- "b" (input_address_hi),
- "D"(output_address_hi), "S"(output_address_lo),
-- "m" (hv_hypercall_pg)
-+ THUNK_TARGET(hv_hypercall_pg)
- : "cc", "memory");
- #endif /* !x86_64 */
- return hv_status;
-@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
-
- #ifdef CONFIG_X86_64
- {
-- __asm__ __volatile__("call *%4"
-+ __asm__ __volatile__(CALL_NOSPEC
- : "=a" (hv_status), ASM_CALL_CONSTRAINT,
- "+c" (control), "+d" (input1)
-- : "m" (hv_hypercall_pg)
-+ : THUNK_TARGET(hv_hypercall_pg)
- : "cc", "r8", "r9", "r10", "r11");
- }
- #else
-@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
- u32 input1_hi = upper_32_bits(input1);
- u32 input1_lo = lower_32_bits(input1);
-
-- __asm__ __volatile__ ("call *%5"
-+ __asm__ __volatile__ (CALL_NOSPEC
- : "=A"(hv_status),
- "+c"(input1_lo),
- ASM_CALL_CONSTRAINT
- : "A" (control),
- "b" (input1_hi),
-- "m" (hv_hypercall_pg)
-+ THUNK_TARGET(hv_hypercall_pg)
- : "cc", "edi", "esi");
- }
- #endif
---
-2.14.3
-
-From b569cb1e72bda00e7e6245519fe7d0d0ab13898e Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:14 +0000
-Subject: [PATCH 08/10] x86/retpoline/xen: Convert Xen hypercall indirect jumps
-
-Convert indirect call in Xen hypercall to use non-speculative sequence,
-when CONFIG_RETPOLINE is enabled.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Juergen Gross <jgross@suse.com>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-9-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/include/asm/xen/hypercall.h | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
-index 7cb282e9e587..bfd882617613 100644
---- a/arch/x86/include/asm/xen/hypercall.h
-+++ b/arch/x86/include/asm/xen/hypercall.h
-@@ -44,6 +44,7 @@
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/smap.h>
-+#include <asm/nospec-branch.h>
-
- #include <xen/interface/xen.h>
- #include <xen/interface/sched.h>
-@@ -217,9 +218,9 @@ privcmd_call(unsigned call,
- __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
-
- stac();
-- asm volatile("call *%[call]"
-+ asm volatile(CALL_NOSPEC
- : __HYPERCALL_5PARAM
-- : [call] "a" (&hypercall_page[call])
-+ : [thunk_target] "a" (&hypercall_page[call])
- : __HYPERCALL_CLOBBER5);
- clac();
-
---
-2.14.3
-
-From 96f71b3a482e918991d165eb7a6b42eb9a9ef735 Mon Sep 17 00:00:00 2001
-From: David Woodhouse <dwmw@amazon.co.uk>
-Date: Tue, 9 Jan 2018 14:43:15 +0000
-Subject: [PATCH 09/10] x86/retpoline/checksum32: Convert assembler indirect
- jumps
-
-Convert all indirect jumps in 32bit checksum assembler code to use
-non-speculative sequences when CONFIG_RETPOLINE is enabled.
-
-Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Andi Kleen <ak@linux.intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-10-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/lib/checksum_32.S | 7 ++++---
- 1 file changed, 4 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
-index 4d34bb548b41..46e71a74e612 100644
---- a/arch/x86/lib/checksum_32.S
-+++ b/arch/x86/lib/checksum_32.S
-@@ -29,7 +29,8 @@
- #include <asm/errno.h>
- #include <asm/asm.h>
- #include <asm/export.h>
--
-+#include <asm/nospec-branch.h>
-+
- /*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-@@ -156,7 +157,7 @@ ENTRY(csum_partial)
- negl %ebx
- lea 45f(%ebx,%ebx,2), %ebx
- testl %esi, %esi
-- jmp *%ebx
-+ JMP_NOSPEC %ebx
-
- # Handle 2-byte-aligned regions
- 20: addw (%esi), %ax
-@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic)
- andl $-32,%edx
- lea 3f(%ebx,%ebx), %ebx
- testl %esi, %esi
-- jmp *%ebx
-+ JMP_NOSPEC %ebx
- 1: addl $64,%esi
- addl $64,%edi
- SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl)
---
-2.14.3
-
-From 9080a45e302772c068f73bc24b3304a416fe2daf Mon Sep 17 00:00:00 2001
-From: Andi Kleen <ak@linux.intel.com>
-Date: Tue, 9 Jan 2018 14:43:16 +0000
-Subject: [PATCH 10/10] x86/retpoline/irq32: Convert assembler indirect jumps
-
-Convert all indirect jumps in 32bit irq inline asm code to use non
-speculative sequences.
-
-Signed-off-by: Andi Kleen <ak@linux.intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Arjan van de Ven <arjan@linux.intel.com>
-Acked-by: Ingo Molnar <mingo@kernel.org>
-Cc: gnomes@lxorguk.ukuu.org.uk
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Jiri Kosina <jikos@kernel.org>
-Cc: Andy Lutomirski <luto@amacapital.net>
-Cc: Dave Hansen <dave.hansen@intel.com>
-Cc: Kees Cook <keescook@google.com>
-Cc: Tim Chen <tim.c.chen@linux.intel.com>
-Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
-Cc: Paul Turner <pjt@google.com>
-Link: https://lkml.kernel.org/r/1515508997-6154-11-git-send-email-dwmw@amazon.co.uk
----
- arch/x86/kernel/irq_32.c | 9 +++++----
- 1 file changed, 5 insertions(+), 4 deletions(-)
-
-diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index a83b3346a0e1..c1bdbd3d3232 100644
---- a/arch/x86/kernel/irq_32.c
-+++ b/arch/x86/kernel/irq_32.c
-@@ -20,6 +20,7 @@
- #include <linux/mm.h>
-
- #include <asm/apic.h>
-+#include <asm/nospec-branch.h>
-
- #ifdef CONFIG_DEBUG_STACKOVERFLOW
-
-@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
- static void call_on_stack(void *func, void *stack)
- {
- asm volatile("xchgl %%ebx,%%esp \n"
-- "call *%%edi \n"
-+ CALL_NOSPEC
- "movl %%ebx,%%esp \n"
- : "=b" (stack)
- : "0" (stack),
-- "D"(func)
-+ [thunk_target] "D"(func)
- : "memory", "cc", "edx", "ecx", "eax");
- }
-
-@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
- call_on_stack(print_stack_overflow, isp);
-
- asm volatile("xchgl %%ebx,%%esp \n"
-- "call *%%edi \n"
-+ CALL_NOSPEC
- "movl %%ebx,%%esp \n"
- : "=a" (arg1), "=b" (isp)
- : "0" (desc), "1" (isp),
-- "D" (desc->handle_irq)
-+ [thunk_target] "D" (desc->handle_irq)
- : "memory", "cc", "ecx");
- return 1;
- }
---
-2.14.3
-
diff --git a/freed-ora/current/f27/sources b/freed-ora/current/f27/sources
index b0120913b..950f62aa3 100644
--- a/freed-ora/current/f27/sources
+++ b/freed-ora/current/f27/sources
@@ -1,3 +1,3 @@
SHA512 (linux-libre-4.14-gnu.tar.xz) = 0d4b0b8ec1ffc39c59295adf56f6a2cccf77cad56d8a8bf8072624bbb52ba3e684147ebed91d1528d2685423dd784c5fca0f3650f874f2b93cfc6b7689b9a87f
SHA512 (perf-man-4.14.tar.gz) = 76a9d8adc284cdffd4b3fbb060e7f9a14109267707ce1d03f4c3239cd70d8d164f697da3a0f90a363fbcac42a61d3c378afbcc2a86f112c501b9cb5ce74ef9f8
-SHA512 (patch-4.14-gnu-4.14.13-gnu.xz) = de1fa0d7cf58d2864dab397c3e8a4f1512a93619c36bacf155084a741d7b048e712c2df87711c2dd4d46d0a556e2c47c796ada76cf7c5eb4c8646e06dcade295
+SHA512 (patch-4.14-gnu-4.14.14-gnu.xz) = 65be470943aa3b3c93d518cf025130d7342c728fb22a9583b6d4b77a359d2c768d13ff60cc374d10411effbf8f89bf1d063c6129931bb4f2caa475954019eae0
diff --git a/freed-ora/current/f27/v4-KVM-Fix-stack-out-of-bounds-read-in-write_mmio.patch b/freed-ora/current/f27/v4-KVM-Fix-stack-out-of-bounds-read-in-write_mmio.patch
deleted file mode 100644
index f3767cda5..000000000
--- a/freed-ora/current/f27/v4-KVM-Fix-stack-out-of-bounds-read-in-write_mmio.patch
+++ /dev/null
@@ -1,215 +0,0 @@
-From patchwork Fri Dec 15 01:40:50 2017
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 8bit
-Subject: [v4] KVM: Fix stack-out-of-bounds read in write_mmio
-From: Wanpeng Li <kernellwp@gmail.com>
-X-Patchwork-Id: 10113513
-Message-Id: <1513302050-14253-1-git-send-email-wanpeng.li@hotmail.com>
-To: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
-Cc: Paolo Bonzini <pbonzini@redhat.com>,
- =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= <rkrcmar@redhat.com>,
- Wanpeng Li <wanpeng.li@hotmail.com>, Marc Zyngier <marc.zyngier@arm.com>,
- Christoffer Dall <christoffer.dall@linaro.org>
-Date: Thu, 14 Dec 2017 17:40:50 -0800
-
-From: Wanpeng Li <wanpeng.li@hotmail.com>
-
-Reported by syzkaller:
-
- BUG: KASAN: stack-out-of-bounds in write_mmio+0x11e/0x270 [kvm]
- Read of size 8 at addr ffff8803259df7f8 by task syz-executor/32298
-
- CPU: 6 PID: 32298 Comm: syz-executor Tainted: G OE 4.15.0-rc2+ #18
- Hardware name: LENOVO ThinkCentre M8500t-N000/SHARKBAY, BIOS FBKTC1AUS 02/16/2016
- Call Trace:
- dump_stack+0xab/0xe1
- print_address_description+0x6b/0x290
- kasan_report+0x28a/0x370
- write_mmio+0x11e/0x270 [kvm]
- emulator_read_write_onepage+0x311/0x600 [kvm]
- emulator_read_write+0xef/0x240 [kvm]
- emulator_fix_hypercall+0x105/0x150 [kvm]
- em_hypercall+0x2b/0x80 [kvm]
- x86_emulate_insn+0x2b1/0x1640 [kvm]
- x86_emulate_instruction+0x39a/0xb90 [kvm]
- handle_exception+0x1b4/0x4d0 [kvm_intel]
- vcpu_enter_guest+0x15a0/0x2640 [kvm]
- kvm_arch_vcpu_ioctl_run+0x549/0x7d0 [kvm]
- kvm_vcpu_ioctl+0x479/0x880 [kvm]
- do_vfs_ioctl+0x142/0x9a0
- SyS_ioctl+0x74/0x80
- entry_SYSCALL_64_fastpath+0x23/0x9a
-
-The path of patched vmmcall will patch 3 bytes opcode 0F 01 C1(vmcall)
-to the guest memory, however, write_mmio tracepoint always prints 8 bytes
-through *(u64 *)val since kvm splits the mmio access into 8 bytes. This
-can result in stack-out-of-bounds read due to access the extra 5 bytes.
-This patch fixes it by just accessing the bytes which we operates on.
-
-Before patch:
-
-syz-executor-5567 [007] .... 51370.561696: kvm_mmio: mmio write len 3 gpa 0x10 val 0x1ffff10077c1010f
-
-After patch:
-
-syz-executor-13416 [002] .... 51302.299573: kvm_mmio: mmio write len 3 gpa 0x10 val 0xc1010f
-
-Reported-by: Dmitry Vyukov <dvyukov@google.com>
-Cc: Paolo Bonzini <pbonzini@redhat.com>
-Cc: Radim Krčmář <rkrcmar@redhat.com>
-Cc: Marc Zyngier <marc.zyngier@arm.com>
-Cc: Christoffer Dall <christoffer.dall@linaro.org>
-Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
-Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
-Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
-Tested-by: Marc Zyngier <marc.zyngier@arm.com>
----
-v3 -> v4:
- * fix the arm tracepoint
-v2 -> v3:
- * fix sparse warning
-v1 -> v2:
- * do the memcpy in kvm_mmio tracepoint
-
- arch/x86/kvm/x86.c | 8 ++++----
- include/trace/events/kvm.h | 6 ++++--
- virt/kvm/arm/mmio.c | 6 +++---
- 3 files changed, 11 insertions(+), 9 deletions(-)
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 0f82e2c..c7071e7 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -4456,7 +4456,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
- addr, n, v))
- && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
- break;
-- trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
-+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
- handled += n;
- addr += n;
- len -= n;
-@@ -4715,7 +4715,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
- {
- if (vcpu->mmio_read_completed) {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-- vcpu->mmio_fragments[0].gpa, *(u64 *)val);
-+ vcpu->mmio_fragments[0].gpa, val);
- vcpu->mmio_read_completed = 0;
- return 1;
- }
-@@ -4737,14 +4737,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
-
- static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
- {
-- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
-+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
- return vcpu_mmio_write(vcpu, gpa, bytes, val);
- }
-
- static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
- void *val, int bytes)
- {
-- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
-+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
- return X86EMUL_IO_NEEDED;
- }
-
-diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
-index e4b0b8e..dfd2170 100644
---- a/include/trace/events/kvm.h
-+++ b/include/trace/events/kvm.h
-@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
- { KVM_TRACE_MMIO_WRITE, "write" }
-
- TRACE_EVENT(kvm_mmio,
-- TP_PROTO(int type, int len, u64 gpa, u64 val),
-+ TP_PROTO(int type, int len, u64 gpa, void *val),
- TP_ARGS(type, len, gpa, val),
-
- TP_STRUCT__entry(
-@@ -225,7 +225,9 @@ TRACE_EVENT(kvm_mmio,
- __entry->type = type;
- __entry->len = len;
- __entry->gpa = gpa;
-- __entry->val = val;
-+ __entry->val = 0;
-+ if (val)
-+ memcpy(&__entry->val, val, min(8, len));
- ),
-
- TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
-diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
-index b6e715f..dac7ceb 100644
---- a/virt/kvm/arm/mmio.c
-+++ b/virt/kvm/arm/mmio.c
-@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
- }
-
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-- data);
-+ &data);
- data = vcpu_data_host_to_guest(vcpu, data, len);
- vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
- }
-@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
- data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
- len);
-
-- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
-+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
- kvm_mmio_write_buf(data_buf, len, data);
-
- ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
- } else {
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-- fault_ipa, 0);
-+ fault_ipa, NULL);
-
- ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
- data_buf);
-From patchwork Mon Dec 18 11:55:05 2017
-Content-Type: text/plain; charset="utf-8"
-MIME-Version: 1.0
-Content-Transfer-Encoding: 7bit
-Subject: [v4] KVM: Fix stack-out-of-bounds read in write_mmio
-From: Paolo Bonzini <pbonzini@redhat.com>
-X-Patchwork-Id: 10118879
-Message-Id: <17d27b8d-908b-a740-1d2d-e92a8507f25b@redhat.com>
-To: Marc Zyngier <marc.zyngier@arm.com>,
- Wanpeng Li <kernellwp@gmail.com>, linux-kernel@vger.kernel.org,
- kvm@vger.kernel.org
-Cc: =?UTF-8?B?UmFkaW0gS3LEjW3DocWZ?= <rkrcmar@redhat.com>,
- Wanpeng Li <wanpeng.li@hotmail.com>,
- Christoffer Dall <christoffer.dall@linaro.org>
-Date: Mon, 18 Dec 2017 12:55:05 +0100
-
-On 15/12/2017 12:06, Marc Zyngier wrote:
-> Assuming you address the above:
->
-> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
-> Tested-by: Marc Zyngier <marc.zyngier@arm.com>
-
-Done as follows:
-
-
-Thanks,
-
-Paolo
-
-diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
-index dfd21708694f..0a016bd14c2d 100644
---- a/include/trace/events/kvm.h
-+++ b/include/trace/events/kvm.h
-@@ -227,7 +227,8 @@
- __entry->gpa = gpa;
- __entry->val = 0;
- if (val)
-- memcpy(&__entry->val, val, min(8, len));
-+ memcpy(&__entry->val, val,
-+ min_t(u32, sizeof(__entry->val), len));
- ),
-
- TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
OpenPOWER on IntegriCloud