/* * Definitions for features to be allowed or restricted for protected guests. * * Each field in the masks represents the highest supported value for the * feature. If a feature field is not present, it is not supported. Moreover, * these are used to generate the guest's view of the feature registers. * * The approach for protected VMs is to at least support features that are: * - Needed by common Linux distributions (e.g., floating point) * - Trivial to support, e.g., supporting the feature does not introduce or * require tracking of additional state in KVM * - Cannot be trapped or prevent the guest from using anyway
*/
/* * None of the features in ID_AA64DFR0_EL1 nor ID_AA64MMFR4_EL1 are supported. * However, both have Not-Implemented values that are non-zero. Define them * so they can be used when getting the value of these registers.
*/ #define ID_AA64DFR0_EL1_NONZERO_NI \
( \
SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI) | \
SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, MTPMU, NI) \
)
/* * Returns the value of the feature registers based on the system register * value, the vcpu support for the revelant features, and the additional * restrictions for protected VMs.
*/ static u64 get_restricted_features(conststruct kvm_vcpu *vcpu,
u64 sys_reg_val, conststruct pvm_ftr_bits restrictions[])
{
u64 val = 0UL; int i;
if (vm_supported && !vm_supported(vcpu->kvm))
val |= (sign ? min_signed : 0) << shift; elseif (sign && (sys_val >= sign_bit || pvm_max >= sign_bit))
val |= max(sys_val, pvm_max) << shift; else
val |= min(sys_val, pvm_max) << shift;
}
return val;
}
static u64 pvm_calc_id_reg(conststruct kvm_vcpu *vcpu, u32 id)
{ switch (id) { case SYS_ID_AA64PFR0_EL1: return get_restricted_features(vcpu, id_aa64pfr0_el1_sys_val, pvmid_aa64pfr0); case SYS_ID_AA64PFR1_EL1: return get_restricted_features(vcpu, id_aa64pfr1_el1_sys_val, pvmid_aa64pfr1); case SYS_ID_AA64ISAR0_EL1: return id_aa64isar0_el1_sys_val; case SYS_ID_AA64ISAR1_EL1: return get_restricted_features(vcpu, id_aa64isar1_el1_sys_val, pvmid_aa64isar1); case SYS_ID_AA64ISAR2_EL1: return get_restricted_features(vcpu, id_aa64isar2_el1_sys_val, pvmid_aa64isar2); case SYS_ID_AA64MMFR0_EL1: return get_restricted_features(vcpu, id_aa64mmfr0_el1_sys_val, pvmid_aa64mmfr0); case SYS_ID_AA64MMFR1_EL1: return get_restricted_features(vcpu, id_aa64mmfr1_el1_sys_val, pvmid_aa64mmfr1); case SYS_ID_AA64MMFR2_EL1: return get_restricted_features(vcpu, id_aa64mmfr2_el1_sys_val, pvmid_aa64mmfr2); case SYS_ID_AA64DFR0_EL1: return ID_AA64DFR0_EL1_NONZERO_NI; case SYS_ID_AA64MMFR4_EL1: return ID_AA64MMFR4_EL1_NONZERO_NI; default: /* Unhandled ID register, RAZ */ return 0;
}
}
/* * Inject an unknown/undefined exception to an AArch64 guest while most of its * sysregs are live.
*/ staticvoid inject_undef64(struct kvm_vcpu *vcpu)
{
u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
/* * Accessor for AArch32 feature id registers. * * The value of these registers is "unknown" according to the spec if AArch32 * isn't supported.
*/ staticbool pvm_access_id_aarch32(struct kvm_vcpu *vcpu, struct sys_reg_params *p, conststruct sys_reg_desc *r)
{ if (p->is_write) {
inject_undef64(vcpu); returnfalse;
}
return pvm_access_raz_wi(vcpu, p, r);
}
/* * Accessor for AArch64 feature id registers. * * If access is allowed, set the regval to the protected VM's view of the * register and return true. * Otherwise, inject an undefined exception and return false.
*/ staticbool pvm_access_id_aarch64(struct kvm_vcpu *vcpu, struct sys_reg_params *p, conststruct sys_reg_desc *r)
{ if (p->is_write) {
inject_undef64(vcpu); returnfalse;
}
p->regval = read_id_reg(vcpu, r); returntrue;
}
staticbool pvm_gic_read_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, conststruct sys_reg_desc *r)
{ /* pVMs only support GICv3. 'nuf said. */ if (!p->is_write)
p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
returntrue;
}
/* Mark the specified system register as an AArch32 feature id register. */ #define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
/* Mark the specified system register as an AArch64 feature id register. */ #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
/* Mark the specified system register as Read-As-Zero/Write-Ignored */ #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
/* Mark the specified system register as not being handled in hyp. */ #define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
/* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 * * NOTE: Anything not explicitly listed here is *restricted by default*, i.e., * it will lead to injecting an exception into the guest.
*/ staticconststruct sys_reg_desc pvm_sys_reg_descs[] = { /* Cache maintenance by set/way operations are restricted. */
/* Debug and Trace Registers are restricted. */
/* Group 1 ID registers */
HOST_HANDLED(SYS_REVIDR_EL1),
/* AArch64 mappings of the AArch32 ID registers */ /* CRm=1 */
AARCH32(SYS_ID_PFR0_EL1),
AARCH32(SYS_ID_PFR1_EL1),
AARCH32(SYS_ID_DFR0_EL1),
AARCH32(SYS_ID_AFR0_EL1),
AARCH32(SYS_ID_MMFR0_EL1),
AARCH32(SYS_ID_MMFR1_EL1),
AARCH32(SYS_ID_MMFR2_EL1),
AARCH32(SYS_ID_MMFR3_EL1),
/* * Checks that the sysreg table is unique and in-order. * * Returns 0 if the table is consistent, or 1 otherwise.
*/ int kvm_check_pvm_sysreg_table(void)
{ unsignedint i;
for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) { if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0) return 1;
}
return 0;
}
/* * Handler for protected VM MSR, MRS or System instruction execution. * * Returns true if the hypervisor has handled the exit, and control should go * back to the guest, or false if it hasn't, to be handled by the host.
*/ bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{ conststruct sys_reg_desc *r; struct sys_reg_params params; unsignedlong esr = kvm_vcpu_get_esr(vcpu); int Rt = kvm_vcpu_sys_get_rt(vcpu);
/* Handled by the host (HOST_HANDLED) */ if (r->access == NULL) returnfalse;
/* Handled by hyp: skip instruction if instructed to do so. */ if (r->access(vcpu, ¶ms, r))
__kvm_skip_instr(vcpu);
if (!params.is_write)
vcpu_set_reg(vcpu, Rt, params.regval);
returntrue;
}
/* * Handler for protected VM restricted exceptions. * * Inject an undefined exception into the guest and return true to indicate that * the hypervisor has handled the exit, and control should go back to the guest.
*/ bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
{
inject_undef64(vcpu); returntrue;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.37 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.