summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/kvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c18
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c97
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h1
-rw-r--r--tools/testing/selftests/kvm/lib/ucall.c36
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c20
5 files changed, 129 insertions, 43 deletions
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index b6022e2f116e..e8c42506a09d 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -268,13 +268,20 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
switch (vm->mode) {
case VM_MODE_P52V48_4K:
- tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
- tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
- break;
+ TEST_ASSERT(false, "AArch64 does not support 4K sized pages "
+ "with 52-bit physical address ranges");
case VM_MODE_P52V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
break;
+ case VM_MODE_P48V48_4K:
+ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
+ break;
+ case VM_MODE_P48V48_64K:
+ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
+ break;
case VM_MODE_P40V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
@@ -305,7 +312,6 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
- fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
- indent, "", pstate, pc);
-
+ fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
+ indent, "", pstate, pc);
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1b41e71283d5..4ca96b228e46 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -85,13 +85,18 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
return ret;
}
-static void vm_open(struct kvm_vm *vm, int perm)
+static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
if (vm->kvm_fd < 0)
exit(KSFT_SKIP);
- vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL);
+ if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
+ fprintf(stderr, "immediate_exit not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
"rc: %i errno: %i", vm->fd, errno);
}
@@ -99,9 +104,13 @@ static void vm_open(struct kvm_vm *vm, int perm)
const char * const vm_guest_mode_string[] = {
"PA-bits:52, VA-bits:48, 4K pages",
"PA-bits:52, VA-bits:48, 64K pages",
+ "PA-bits:48, VA-bits:48, 4K pages",
+ "PA-bits:48, VA-bits:48, 64K pages",
"PA-bits:40, VA-bits:48, 4K pages",
"PA-bits:40, VA-bits:48, 64K pages",
};
+_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
+ "Missing new mode strings?");
/*
* VM Create
@@ -122,7 +131,8 @@ const char * const vm_guest_mode_string[] = {
* descriptor to control the created VM is created with the permissions
* given by perm (e.g. O_RDWR).
*/
-struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
+ int perm, unsigned long type)
{
struct kvm_vm *vm;
int kvm_fd;
@@ -131,22 +141,38 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_ASSERT(vm != NULL, "Insufficient Memory");
vm->mode = mode;
- vm_open(vm, perm);
+ vm->type = type;
+ vm_open(vm, perm, type);
/* Setup mode specific traits. */
switch (vm->mode) {
case VM_MODE_P52V48_4K:
vm->pgtable_levels = 4;
+ vm->pa_bits = 52;
+ vm->va_bits = 48;
vm->page_size = 0x1000;
vm->page_shift = 12;
- vm->va_bits = 48;
break;
case VM_MODE_P52V48_64K:
vm->pgtable_levels = 3;
vm->pa_bits = 52;
+ vm->va_bits = 48;
vm->page_size = 0x10000;
vm->page_shift = 16;
+ break;
+ case VM_MODE_P48V48_4K:
+ vm->pgtable_levels = 4;
+ vm->pa_bits = 48;
vm->va_bits = 48;
+ vm->page_size = 0x1000;
+ vm->page_shift = 12;
+ break;
+ case VM_MODE_P48V48_64K:
+ vm->pgtable_levels = 3;
+ vm->pa_bits = 48;
+ vm->va_bits = 48;
+ vm->page_size = 0x10000;
+ vm->page_shift = 16;
break;
case VM_MODE_P40V48_4K:
vm->pgtable_levels = 4;
@@ -186,6 +212,11 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
+struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+{
+ return _vm_create(mode, phy_pages, perm, 0);
+}
+
/*
* VM Restart
*
@@ -203,7 +234,7 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
{
struct userspace_mem_region *region;
- vm_open(vmp, perm);
+ vm_open(vmp, perm, vmp->type);
if (vmp->has_irqchip)
vm_create_irqchip(vmp);
@@ -231,6 +262,19 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
strerror(-ret));
}
+void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
+ uint64_t first_page, uint32_t num_pages)
+{
+ struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
+ .first_page = first_page,
+ .num_pages = num_pages };
+ int ret;
+
+ ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
+ TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
+ strerror(-ret));
+}
+
/*
* Userspace Memory Region Find
*
@@ -532,7 +576,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
- vm, guest_paddr, guest_paddr + npages * vm->page_size);
+ vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
if (region != NULL)
TEST_ASSERT(false, "overlapping userspace_mem_region already "
"exists\n"
@@ -548,15 +592,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region = region->next) {
if (region->region.slot == slot)
break;
- if ((guest_paddr <= (region->region.guest_phys_addr
- + region->region.memory_size))
- && ((guest_paddr + npages * vm->page_size)
- >= region->region.guest_phys_addr))
- break;
}
if (region != NULL)
TEST_ASSERT(false, "A mem region with the requested slot "
- "or overlapping physical memory range already exists.\n"
+ "already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
slot, guest_paddr, npages,
@@ -1087,6 +1126,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
return rc;
}
+void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ int ret;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+
+ vcpu->state->immediate_exit = 1;
+ ret = ioctl(vcpu->fd, KVM_RUN, NULL);
+ vcpu->state->immediate_exit = 0;
+
+ TEST_ASSERT(ret == -1 && errno == EINTR,
+ "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
+ ret, errno);
+}
+
/*
* VM VCPU Set MP State
*
@@ -1270,14 +1325,24 @@ int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
unsigned long cmd, void *arg)
{
+ int ret;
+
+ ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
+ TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
+ cmd, ret, errno, strerror(errno));
+}
+
+int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
+ unsigned long cmd, void *arg)
+{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
ret = ioctl(vcpu->fd, cmd, arg);
- TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
- cmd, ret, errno, strerror(errno));
+
+ return ret;
}
/*
@@ -1422,7 +1487,7 @@ const char *exit_reason_str(unsigned int exit_reason)
*
* Within the VM specified by vm, locates a range of available physical
* pages at or above paddr_min. If found, the pages are marked as in use
- * and thier base address is returned. A TEST_ASSERT failure occurs if
+ * and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 52701db0f253..4595e42c6e29 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -44,6 +44,7 @@ struct vcpu {
struct kvm_vm {
int mode;
+ unsigned long type;
int kvm_fd;
int fd;
unsigned int pgtable_levels;
diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c
index 4777f9bb5194..a2ab38be2f47 100644
--- a/tools/testing/selftests/kvm/lib/ucall.c
+++ b/tools/testing/selftests/kvm/lib/ucall.c
@@ -34,7 +34,8 @@ void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
return;
if (type == UCALL_MMIO) {
- vm_paddr_t gpa, start, end, step;
+ vm_paddr_t gpa, start, end, step, offset;
+ unsigned bits;
bool ret;
if (arg) {
@@ -45,25 +46,30 @@ void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
}
/*
- * Find an address within the allowed virtual address space,
- * that does _not_ have a KVM memory region associated with it.
- * Identity mapping an address like this allows the guest to
+ * Find an address within the allowed physical and virtual address
+ * spaces, that does _not_ have a KVM memory region associated with
+ * it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
* will assume it's something userspace handles and exit with
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
- * Here we start with a guess that the addresses around two
- * thirds of the VA space are unmapped and then work both down
- * and up from there in 1/6 VA space sized steps.
+ * Here we start with a guess that the addresses around 5/8th
+ * of the allowed space are unmapped and then work both down and
+ * up from there in 1/16th allowed space sized steps.
+ *
+ * Note, we need to use VA-bits - 1 when calculating the allowed
+ * virtual address space for an identity mapping because the upper
+ * half of the virtual address space is the two's complement of the
+ * lower and won't match physical addresses.
*/
- start = 1ul << (vm->va_bits * 2 / 3);
- end = 1ul << vm->va_bits;
- step = 1ul << (vm->va_bits / 6);
- for (gpa = start; gpa >= 0; gpa -= step) {
- if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ bits = vm->va_bits - 1;
+ bits = vm->pa_bits < bits ? vm->pa_bits : bits;
+ end = 1ul << bits;
+ start = end * 5 / 8;
+ step = end / 16;
+ for (offset = 0; offset < end - start; offset += step) {
+ if (ucall_mmio_init(vm, start - offset))
return;
- }
- for (gpa = start + step; gpa < end; gpa += step) {
- if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ if (ucall_mmio_init(vm, start + offset))
return;
}
TEST_ASSERT(false, "Can't find a ucall mmio address");
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index f28127f4a3af..dc7fae9fa424 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1030,6 +1030,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
nested_size, sizeof(state->nested_));
}
+ /*
+ * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
+ * guest state is consistent only after userspace re-enters the
+ * kernel with KVM_RUN. Complete IO prior to migrating state
+ * to a new VM.
+ */
+ vcpu_run_complete_io(vm, vcpuid);
+
nmsrs = kvm_get_num_msrs(vm);
list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
list->nmsrs = nmsrs;
@@ -1093,12 +1101,6 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int r;
- if (state->nested.size) {
- r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
- r);
- }
-
r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
r);
@@ -1130,4 +1132,10 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
r);
+
+ if (state->nested.size) {
+ r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
+ r);
+ }
}
OpenPOWER on IntegriCloud