summaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-03-23 14:58:17 -0700
committerThomas Gleixner <tglx@linutronix.de>2018-03-27 12:01:47 +0200
commit07cde313b2d21f728cec2836db7cdb55476f7a26 (patch)
tree88e450ffab9354ddc069700898fb9d7d21c192b3 /arch/x86/lib
parent13cc36d76bc4f5a9801ae32630bc8240ba0cc522 (diff)
downloadtalos-op-linux-07cde313b2d21f728cec2836db7cdb55476f7a26.tar.gz
talos-op-linux-07cde313b2d21f728cec2836db7cdb55476f7a26.zip
x86/msr: Allow rdmsr_safe_on_cpu() to schedule
High latencies can be observed caused by a daemon periodically reading various MSR on all cpus. On KASAN enabled kernels ~10ms latencies can be observed simply reading one MSR. Even without KASAN, sending an IPI to a CPU, which is in a deep sleep state or in a long hard IRQ disabled section, waiting for the answer can consume hundreds of microseconds. All usage sites are in preemptible context, convert rdmsr_safe_on_cpu() to use a completion instead of busy polling. Overall daemon cpu usage was reduced by 35 %, and latencies caused by msr_read() disappeared. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Eric Dumazet <eric.dumazet@gmail.com> Link: https://lkml.kernel.org/r/20180323215818.127774-1-edumazet@google.com
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/msr-smp.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 693cce0be82d..761ba062afda 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -2,6 +2,7 @@
#include <linux/export.h>
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/completion.h>
#include <asm/msr.h>
static void __rdmsr_on_cpu(void *info)
@@ -143,13 +144,19 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
}
EXPORT_SYMBOL(wrmsr_on_cpus);
+struct msr_info_completion {
+ struct msr_info msr;
+ struct completion done;
+};
+
/* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */
static void __rdmsr_safe_on_cpu(void *info)
{
- struct msr_info *rv = info;
+ struct msr_info_completion *rv = info;
- rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
+ rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
+ complete(&rv->done);
}
static void __wrmsr_safe_on_cpu(void *info)
@@ -161,17 +168,26 @@ static void __wrmsr_safe_on_cpu(void *info)
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
+ struct msr_info_completion rv;
+ call_single_data_t csd = {
+ .func = __rdmsr_safe_on_cpu,
+ .info = &rv,
+ };
int err;
- struct msr_info rv;
memset(&rv, 0, sizeof(rv));
+ init_completion(&rv.done);
+ rv.msr.msr_no = msr_no;
- rv.msr_no = msr_no;
- err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
- *l = rv.reg.l;
- *h = rv.reg.h;
+ err = smp_call_function_single_async(cpu, &csd);
+ if (!err) {
+ wait_for_completion(&rv.done);
+ err = rv.msr.err;
+ }
+ *l = rv.msr.reg.l;
+ *h = rv.msr.reg.h;
- return err ? err : rv.err;
+ return err;
}
EXPORT_SYMBOL(rdmsr_safe_on_cpu);
OpenPOWER on IntegriCloud