Merge branch kvm/selftests/memslot into kvmarm-master/next

* kvm/selftests/memslot:
  : .
  : Enable KVM memslot selftests on arm64, making them less
  : x86 specific.
  : .
  KVM: selftests: Build the memslot tests for arm64
  KVM: selftests: Make memslot_perf_test arch independent

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2021-10-21 11:40:03 +01:00
commit 5a2acbbb01
2 changed files with 36 additions and 22 deletions

View file

@ -96,6 +96,8 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
TEST_GEN_PROGS_aarch64 += memslot_perf_test
TEST_GEN_PROGS_aarch64 += rseq_test TEST_GEN_PROGS_aarch64 += rseq_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time TEST_GEN_PROGS_aarch64 += steal_time

View file

@ -127,43 +127,54 @@ static bool verbose;
pr_info(__VA_ARGS__); \ pr_info(__VA_ARGS__); \
} while (0) } while (0)
static void check_mmio_access(struct vm_data *vm, struct kvm_run *run)
{
TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
TEST_ASSERT(run->mmio.len == 8,
"Unexpected exit mmio size = %u", run->mmio.len);
TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
run->mmio.phys_addr <= vm->mmio_gpa_max,
"Unexpected exit mmio address = 0x%llx",
run->mmio.phys_addr);
}
static void *vcpu_worker(void *data) static void *vcpu_worker(void *data)
{ {
struct vm_data *vm = data; struct vm_data *vm = data;
struct kvm_run *run; struct kvm_run *run;
struct ucall uc; struct ucall uc;
uint64_t cmd;
run = vcpu_state(vm->vm, VCPU_ID); run = vcpu_state(vm->vm, VCPU_ID);
while (1) { while (1) {
vcpu_run(vm->vm, VCPU_ID); vcpu_run(vm->vm, VCPU_ID);
if (run->exit_reason == KVM_EXIT_IO) { switch (get_ucall(vm->vm, VCPU_ID, &uc)) {
cmd = get_ucall(vm->vm, VCPU_ID, &uc); case UCALL_SYNC:
if (cmd != UCALL_SYNC) TEST_ASSERT(uc.args[1] == 0,
break; "Unexpected sync ucall, got %lx",
(ulong)uc.args[1]);
sem_post(&vcpu_ready); sem_post(&vcpu_ready);
continue; continue;
} case UCALL_NONE:
if (run->exit_reason == KVM_EXIT_MMIO)
if (run->exit_reason != KVM_EXIT_MMIO) check_mmio_access(vm, run);
else
goto done;
break; break;
case UCALL_ABORT:
TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); TEST_FAIL("%s at %s:%ld, val = %lu",
TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); (const char *)uc.args[0],
TEST_ASSERT(run->mmio.len == 8, __FILE__, uc.args[1], uc.args[2]);
"Unexpected exit mmio size = %u", run->mmio.len); break;
TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && case UCALL_DONE:
run->mmio.phys_addr <= vm->mmio_gpa_max, goto done;
"Unexpected exit mmio address = 0x%llx", default:
run->mmio.phys_addr); TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
} }
if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) done:
TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2]);
return NULL; return NULL;
} }
@ -268,6 +279,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
TEST_ASSERT(data->hva_slots, "malloc() fail"); TEST_ASSERT(data->hva_slots, "malloc() fail");
data->vm = vm_create_default(VCPU_ID, mempages, guest_code); data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
ucall_init(data->vm, NULL);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
max_mem_slots - 1, data->pages_per_slot, rempages); max_mem_slots - 1, data->pages_per_slot, rempages);