/* * Fills in gva_list starting from offset. Returns the number of items added.
*/ staticinlineint fill_gva_list(u64 gva_list[], int offset, unsignedlong start, unsignedlong end)
{ int gva_n = offset; unsignedlong cur = start, diff;
do {
diff = end > cur ? end - cur : 0;
gva_list[gva_n] = cur & PAGE_MASK; /* * Lower 12 bits encode the number of additional * pages to flush (in addition to the 'cur' page).
*/ if (diff >= HV_TLB_FLUSH_UNIT) {
gva_list[gva_n] |= ~PAGE_MASK;
cur += HV_TLB_FLUSH_UNIT;
} elseif (diff) {
gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
cur = end;
}
if (unlikely(!flush)) {
local_irq_restore(flags); goto do_native;
}
if (info->mm) { /* * AddressSpace argument must match the CR3 with PCID bits * stripped out.
*/
flush->address_space = virt_to_phys(info->mm->pgd);
flush->address_space &= CR3_ADDR_MASK;
flush->flags = 0;
} else {
flush->address_space = 0;
flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
}
flush->processor_mask = 0; if (cpumask_equal(cpus, cpu_present_mask)) {
flush->flags |= HV_FLUSH_ALL_PROCESSORS;
} else { /* * From the supplied CPU set we need to figure out if we can get * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE} * hypercalls. This is possible when the highest VP number in * the set is < 64. As VP numbers are usually in ascending order * and match Linux CPU ids, here is an optimization: we check * the VP number for the highest bit in the supplied set first * so we can quickly find out if using *_EX hypercalls is a * must. We will also check all VP numbers when walking the * supplied CPU set to remain correct in all cases.
*/
cpu = cpumask_last(cpus);
if (cpu < nr_cpumask_bits && hv_cpu_number_to_vp_number(cpu) >= 64) goto do_ex_hypercall;
for_each_cpu(cpu, cpus) { if (do_lazy && cpu_is_lazy(cpu)) continue;
vcpu = hv_cpu_number_to_vp_number(cpu); if (vcpu == VP_INVAL) {
local_irq_restore(flags); goto do_native;
}
/* nothing to flush if 'processor_mask' ends up being empty */ if (!flush->processor_mask) {
local_irq_restore(flags); return;
}
}
/* * We can flush not more than max_gvas with one hypercall. Flush the * whole address space if we were asked to do more.
*/
max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
/* * We can flush not more than max_gvas with one hypercall. Flush the * whole address space if we were asked to do more. * * For these hypercalls, Hyper-V treats the valid_bank_mask field * of flush->hv_vp_set as part of the fixed size input header. * So the variable input header size is equal to nr_bank.
*/
max_gvas =
(PAGE_SIZE - sizeof(*flush) - nr_bank * sizeof(flush->hv_vp_set.bank_contents[0])) / sizeof(flush->gva_list[0]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.