/* * accept_memory() -- Consult bitmap and accept the memory if needed. * * Only memory that is explicitly marked as unaccepted in the bitmap requires * an action. All the remaining memory is implicitly accepted and doesn't need * acceptance. * * No need to accept: * - anything if the system has no unaccepted table; * - memory that is below phys_base; * - memory that is above the memory that addressable by the bitmap;
*/ void accept_memory(phys_addr_t start, unsignedlong size)
{ struct efi_unaccepted_memory *unaccepted; unsignedlong range_start, range_end; struct accept_range range, *entry;
phys_addr_t end = start + size; unsignedlong flags;
u64 unit_size;
unaccepted = efi_get_unaccepted_table(); if (!unaccepted) return;
unit_size = unaccepted->unit_size;
/* * Only care for the part of the range that is represented * in the bitmap.
*/ if (start < unaccepted->phys_base)
start = unaccepted->phys_base; if (end < unaccepted->phys_base) return;
/* Translate to offsets from the beginning of the bitmap */
start -= unaccepted->phys_base;
end -= unaccepted->phys_base;
/* * load_unaligned_zeropad() can lead to unwanted loads across page * boundaries. The unwanted loads are typically harmless. But, they * might be made to totally unrelated or even unmapped memory. * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now * #VE) to recover from these unwanted loads. * * But, this approach does not work for unaccepted memory. For TDX, a * load from unaccepted memory will not lead to a recoverable exception * within the guest. The guest will exit to the VMM where the only * recourse is to terminate the guest. * * There are two parts to fix this issue and comprehensively avoid * access to unaccepted memory. Together these ensure that an extra * "guard" page is accepted in addition to the memory that needs to be * used: * * 1. Implicitly extend the range_contains_unaccepted_memory(start, size) * checks up to the next unit_size if 'start+size' is aligned on a * unit_size boundary. * * 2. Implicitly extend accept_memory(start, size) to the next unit_size * if 'size+end' is aligned on a unit_size boundary. (immediately * following this comment)
*/ if (!(end % unit_size))
end += unit_size;
/* Make sure not to overrun the bitmap */ if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
/* * Check if anybody works on accepting the same range of the memory. * * The check is done with unit_size granularity. It is crucial to catch * all accept requests to the same unit_size block, even if they don't * overlap on physical address level.
*/
list_for_each_entry(entry, &accepting_list, list) { if (entry->end <= range.start) continue; if (entry->start >= range.end) continue;
/* * Somebody else accepting the range. Or at least part of it. * * Drop the lock and retry until it is complete.
*/
spin_unlock_irqrestore(&unaccepted_memory_lock, flags); goto retry;
}
/* * Register that the range is about to be accepted. * Make sure nobody else will accept it.
*/
list_add(&range.list, &accepting_list);
/* * Keep interrupts disabled until the accept operation is * complete in order to prevent deadlocks. * * Enabling interrupts before calling arch_accept_memory() * creates an opportunity for an interrupt handler to request * acceptance for the same memory. The handler will continuously * spin with interrupts disabled, preventing other task from * making progress with the acceptance process.
*/
spin_unlock(&unaccepted_memory_lock);
unaccepted = efi_get_unaccepted_table(); if (!unaccepted) returnfalse;
unit_size = unaccepted->unit_size;
/* * Only care for the part of the range that is represented * in the bitmap.
*/ if (start < unaccepted->phys_base)
start = unaccepted->phys_base; if (end < unaccepted->phys_base) returnfalse;
/* Translate to offsets from the beginning of the bitmap */
start -= unaccepted->phys_base;
end -= unaccepted->phys_base;
/* * Also consider the unaccepted state of the *next* page. See fix #1 in * the comment on load_unaligned_zeropad() in accept_memory().
*/ if (!(end % unit_size))
end += unit_size;
/* Make sure not to overrun the bitmap */ if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
spin_lock_irqsave(&unaccepted_memory_lock, flags); while (start < end) { if (test_bit(start / unit_size, unaccepted->bitmap)) {
ret = true; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.