/* * Guest virtual memory offset of the testing memory slot. * Must not conflict with identity mapped test code.
*/ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
struct vcpu_thread { /* The index of the vCPU. */ int vcpu_idx;
/* The pthread backing the vCPU. */
pthread_t thread;
/* Set to true once the vCPU thread is up and running. */ bool running;
};
/* The vCPU threads involved in this test. */ staticstruct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
/* The function run by each vCPU thread, as provided by the test. */ staticvoid (*vcpu_thread_fn)(struct memstress_vcpu_args *);
/* Set to true once all vCPU threads are up and running. */ staticbool all_vcpu_threads_running;
staticstruct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/* * Continuously write to the first 8 bytes of each page in the * specified region.
*/ void memstress_guest_code(uint32_t vcpu_idx)
{ struct memstress_args *args = &memstress_args; struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; struct guest_random_state rand_state;
uint64_t gva;
uint64_t pages;
uint64_t addr;
uint64_t page; int i;
/* By default vCPUs will write to memory. */
args->write_percent = 100;
/* * Snapshot the non-huge page size. This is used by the guest code to * access/dirty pages at the logging granularity.
*/
args->guest_page_size = vm_guest_mode_params[mode].page_size;
TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0, "Guest memory size is not host page size aligned.");
TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0, "Guest memory size is not guest page size aligned.");
TEST_ASSERT(guest_num_pages % slots == 0, "Guest memory cannot be evenly divided into %d slots.",
slots);
/* * If using nested, allocate extra pages for the nested page tables and * in-memory data structures.
*/ if (args->nested)
slot0_pages += memstress_nested_pages(nr_vcpus);
/* * Pass guest_num_pages to populate the page tables for test memory. * The memory is also added to memslot 0, but that's a benign side * effect as KVM allows aliasing HVAs in meslots.
*/
vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus,
slot0_pages + guest_num_pages,
memstress_guest_code, vcpus);
args->vm = vm;
/* Put the test region at the top guest physical memory. */
region_end_gfn = vm->max_gfn + 1;
#ifdef __x86_64__ /* * When running vCPUs in L2, restrict the test region to 48 bits to * avoid needing 5-level page tables to identity map L2.
*/ if (args->nested)
region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size); #endif /* * If there should be more memory in the guest test region than there * can be pages in the guest, it will definitely cause problems.
*/
TEST_ASSERT(guest_num_pages < region_end_gfn, "Requested more guest memory than address space allows.\n" " guest pages: %" PRIx64 " max gfn: %" PRIx64 " nr_vcpus: %d wss: %" PRIx64 "]",
guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes);
void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
{
pr_info("%s() not support on this architecture, skipping.\n", __func__); exit(KSFT_SKIP);
}
if (memstress_args.pin_vcpus)
pin_self_to_cpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);
WRITE_ONCE(vcpu->running, true);
/* * Wait for all vCPU threads to be up and running before calling the test- * provided vCPU thread function. This prevents thread creation (which * requires taking the mmap_sem in write mode) from interfering with the * guest faulting in its memory.
*/ while (!READ_ONCE(all_vcpu_threads_running))
;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.