// SPDX-License-Identifier: GPL-2.0 /* * KVM dirty page logging performance test * * Based on dirty_log_test.c * * Copyright (C) 2018, Red Hat, Inc. * Copyright (C) 2020, Google, Inc.
*/
staticvoid arch_setup_vm(struct kvm_vm *vm, unsignedint nr_vcpus)
{ /* * The test can still run even if hardware does not support GICv3, as it * is only an optimization to reduce guest exits.
*/
gic_fd = vgic_v3_setup(vm, nr_vcpus, 64);
}
/* * Keep running the guest while dirty logging is being disabled * (iteration is negative) so that vCPUs are accessing memory * for the entire duration of zapping collapsible SPTEs.
*/ while (current_iteration == READ_ONCE(iteration) &&
READ_ONCE(iteration) >= 0 && !READ_ONCE(host_quit)) {}
}
clock_gettime(CLOCK_MONOTONIC, &start); for (i = 0; i < nr_vcpus; i++)
vcpu_last_completed_iteration[i] = -1;
/* * Use 100% writes during the population phase to ensure all * memory is actually populated and not just mapped to the zero * page. The prevents expensive copy-on-write faults from * occurring during the dirty memory iterations below, which * would pollute the performance results.
*/
memstress_set_write_percent(vm, 100);
memstress_set_random_access(vm, false);
memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
/* Allow the vCPUs to populate memory */
pr_debug("Starting iteration %d - Populating\n", iteration); for (i = 0; i < nr_vcpus; i++) { while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
iteration)
;
}
while (iteration < p->iterations) { /* * Incrementing the iteration number will start the vCPUs * dirtying memory again.
*/
clock_gettime(CLOCK_MONOTONIC, &start);
iteration++;
pr_debug("Starting iteration %d\n", iteration); for (i = 0; i < nr_vcpus; i++) { while (READ_ONCE(vcpu_last_completed_iteration[i])
!= iteration)
;
}
/* * Run vCPUs while dirty logging is being disabled to stress disabling * in terms of both performance and correctness. Opt-in via command * line as this significantly increases time to disable dirty logging.
*/ if (run_vcpus_while_disabling_dirty_logging)
WRITE_ONCE(iteration, -1);
/* * Tell the vCPU threads to quit. No need to manually check that vCPUs * have stopped running after disabling dirty logging, the join will * wait for them to exit.
*/
host_quit = true;
memstress_join_vcpu_threads(nr_vcpus);
avg = timespec_div(get_dirty_log_total, p->iterations);
pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
p->iterations, get_dirty_log_total.tv_sec,
get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
if (dirty_log_manual_caps) {
avg = timespec_div(clear_dirty_log_total, p->iterations);
pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
p->iterations, clear_dirty_log_total.tv_sec,
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
staticvoid help(char *name)
{
puts("");
printf("usage: %s [-h] [-a] [-i iterations] [-p offset] [-g] " "[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-r random seed ] [-s mem type]" "[-x memslots] [-w percentage] [-c physical cpus to run test on]\n", name);
puts("");
printf(" -a: access memory randomly rather than in order.\n");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -g: Do not enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2. This\n" " makes KVM_GET_DIRTY_LOG clear the dirty log (i.e.\n" " KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is not enabled)\n" " and writes will be tracked as soon as dirty logging is\n" " enabled on the memslot (i.e. KVM_DIRTY_LOG_INITIALLY_SET\n" " is not enabled).\n");
printf(" -p: specify guest physical test memory offset\n" " Warning: a low offset can conflict with the loaded test code.\n");
guest_modes_help();
printf(" -n: Run the vCPUs in nested mode (L2)\n");
printf(" -e: Run vCPUs while dirty logging is being disabled. This\n" " can significantly increase runtime, especially if there\n" " isn't a dedicated pCPU for the main thread.\n");
printf(" -b: specify the size of the memory region which should be\n" " dirtied by each vCPU. e.g. 10M or 3G.\n" " (default: 1G)\n");
printf(" -v: specify the number of vCPUs to run.\n");
printf(" -o: Overlap guest memory accesses instead of partitioning\n" " them into a separate region of memory for each vCPU.\n");
printf(" -r: specify the starting random seed.\n");
backing_src_help("-s");
printf(" -x: Split the memory region into this number of memslots.\n" " (default: 1)\n");
printf(" -w: specify the percentage of pages which should be written to\n" " as an integer from 0-100 inclusive. This is probabilistic,\n" " so -w X means each page has an X%% chance of writing\n" " and a (100-X)%% chance of reading.\n" " (default: 100 i.e. all pages are written to.)\n");
kvm_print_vcpu_pinning_help();
puts(""); exit(0);
}
int main(int argc, char *argv[])
{ int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); constchar *pcpu_list = NULL; struct test_params p = {
.iterations = TEST_HOST_LOOP_N,
.partition_vcpu_memory_access = true,
.backing_src = DEFAULT_VM_MEM_SRC,
.slots = 1,
.write_percent = 100,
}; int opt;
/* Override the seed to be deterministic by default. */
guest_random_seed = 1;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.