summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-04 11:36:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-04 11:36:19 -0800
commit2d62e0768d3c28536d4cfe4c40ba1e5e8e442a93 (patch)
tree333f8cbcdb3b650813d758711a9e4ceee7b6fbce /arch/x86
parentbe834aafdf5f8a37c191e697ac8ee6d53ab5020c (diff)
parent16ce771b93ab569490fd27415694132a7ade0d79 (diff)
downloadblackbird-op-linux-2d62e0768d3c28536d4cfe4c40ba1e5e8e442a93.tar.gz
blackbird-op-linux-2d62e0768d3c28536d4cfe4c40ba1e5e8e442a93.zip
Merge tag 'kvm-4.11-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Radim Krčmář: "Second batch of KVM changes for the 4.11 merge window: PPC: - correct assumption about ASDR on POWER9 - fix MMIO emulation on POWER9 x86: - add a simple test for ioperm - cleanup TSS (going through KVM tree as the whole undertaking was caused by VMX's use of TSS) - fix nVMX interrupt delivery - fix some performance counters in the guest ... and two cleanup patches" * tag 'kvm-4.11-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: nVMX: Fix pending events injection x86/kvm/vmx: remove unused variable in segment_base() selftests/x86: Add a basic selftest for ioperm x86/asm: Tidy up TSS limit code kvm: convert kvm.users_count from atomic_t to refcount_t KVM: x86: never specify a sample period for virtualized in_tx_cp counters KVM: PPC: Book3S HV: Don't use ASDR for real-mode HPT faults on POWER9 KVM: PPC: Book3S HV: Fix software walk of guest process page tables
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/desc.h18
-rw-r--r--arch/x86/kernel/ioport.c8
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kvm/pmu.c13
-rw-r--r--arch/x86/kvm/vmx.c9
5 files changed, 37 insertions, 17 deletions
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index cb8f9149f6c8..1548ca92ad3f 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void)
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
+DECLARE_PER_CPU(bool, __tss_limit_invalid);
+
static inline void force_reload_TR(void)
{
struct desc_struct *d = get_cpu_gdt_table(smp_processor_id());
@@ -220,18 +222,20 @@ static inline void force_reload_TR(void)
write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
load_TR_desc();
+ this_cpu_write(__tss_limit_invalid, false);
}
-DECLARE_PER_CPU(bool, need_tr_refresh);
-
-static inline void refresh_TR(void)
+/*
+ * Call this if you need the TSS limit to be correct, which should be the case
+ * if and only if you have TIF_IO_BITMAP set or you're switching to a task
+ * with TIF_IO_BITMAP set.
+ */
+static inline void refresh_tss_limit(void)
{
DEBUG_LOCKS_WARN_ON(preemptible());
- if (unlikely(this_cpu_read(need_tr_refresh))) {
+ if (unlikely(this_cpu_read(__tss_limit_invalid)))
force_reload_TR();
- this_cpu_write(need_tr_refresh, false);
- }
}
/*
@@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void)
if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
force_reload_TR();
else
- this_cpu_write(need_tr_refresh, true);
+ this_cpu_write(__tss_limit_invalid, true);
}
static inline void native_load_gdt(const struct desc_ptr *dtr)
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index ca49bab3e467..9c3cf0944bce 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -48,8 +48,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
t->io_bitmap_ptr = bitmap;
set_thread_flag(TIF_IO_BITMAP);
+ /*
+ * Now that we have an IO bitmap, we need our TSS limit to be
+ * correct. It's fine if we are preempted after doing this:
+ * with TIF_IO_BITMAP set, context switches will keep our TSS
+ * limit correct.
+ */
preempt_disable();
- refresh_TR();
+ refresh_tss_limit();
preempt_enable();
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 56b059486c3b..f67591561711 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -69,8 +69,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
-DEFINE_PER_CPU(bool, need_tr_refresh);
-EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh);
+DEFINE_PER_CPU(bool, __tss_limit_invalid);
+EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
/*
* this gets called so that we can store lazy state into memory and copy the
@@ -222,7 +222,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
* Make sure that the TSS limit is correct for the CPU
* to notice the IO bitmap.
*/
- refresh_TR();
+ refresh_tss_limit();
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
/*
* Clear any possible leftover bits:
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 06ce377dcbc9..026db42a86c3 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -113,12 +113,19 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
.config = config,
};
+ attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
if (in_tx)
attr.config |= HSW_IN_TX;
- if (in_tx_cp)
+ if (in_tx_cp) {
+ /*
+ * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
+ * period. Just clear the sample period so at least
+ * allocating the counter doesn't fail.
+ */
+ attr.sample_period = 0;
attr.config |= HSW_IN_TX_CHECKPOINTED;
-
- attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+ }
event = perf_event_create_kernel_counter(&attr, -1, current,
intr ? kvm_perf_overflow_intr :
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ef4ba71dbb66..283aa8601833 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2053,7 +2053,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
static unsigned long segment_base(u16 selector)
{
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
- struct desc_struct *d;
struct desc_struct *table;
unsigned long v;
@@ -10642,6 +10641,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (vcpu->arch.exception.pending ||
+ vcpu->arch.nmi_injected ||
+ vcpu->arch.interrupt.pending)
+ return -EBUSY;
+
if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
vmx->nested.preemption_timer_expired) {
if (vmx->nested.nested_run_pending)
@@ -10651,8 +10655,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
}
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
- if (vmx->nested.nested_run_pending ||
- vcpu->arch.interrupt.pending)
+ if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
NMI_VECTOR | INTR_TYPE_NMI_INTR |
OpenPOWER on IntegriCloud