// SPDX-License-Identifier: GPL-2.0-only /* * Fault injection for both 32 and 64bit guests. * * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * Based on arch/arm/kvm/emulate.c * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
staticunsignedint exception_target_el(struct kvm_vcpu *vcpu)
{ /* If not nesting, EL1 is the only possible exception target */ if (likely(!vcpu_has_nv(vcpu))) return PSR_MODE_EL1h;
/* * With NV, we need to pick between EL1 and EL2. Note that we * never deal with a nesting exception here, hence never * changing context, and the exception itself can be delayed * until the next entry.
*/ switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) { case PSR_MODE_EL2h: case PSR_MODE_EL2t: return PSR_MODE_EL2h; case PSR_MODE_EL1h: case PSR_MODE_EL1t: return PSR_MODE_EL1h; case PSR_MODE_EL0t: return vcpu_el2_tge_is_set(vcpu) ? PSR_MODE_EL2h : PSR_MODE_EL1h; default:
BUG();
}
}
/* This delight is brought to you by FEAT_DoubleFault2. */ if (effective_sctlr2_ease(vcpu))
pend_serror_exception(vcpu); else
pend_sync_exception(vcpu);
/* * Build an {i,d}abort, depending on the level and the * instruction set. Report an external synchronous abort.
*/ if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_ELx_IL;
/* * Here, the guest runs in AArch64 mode when in EL1. If we get * an AArch32 fault, it means we managed to trap an EL0 fault.
*/ if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT); else
esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt)
esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
/* Give the guest an IMPLEMENTATION DEFINED exception */ if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
} else { /* no need to shuffle FS[4] into DFSR[10] as it's 0 */
fsr = DFSR_FSC_EXTABT_nLPAE;
}
far = vcpu_read_sys_reg(vcpu, FAR_EL1);
if (is_pabt) {
kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
far &= GENMASK(31, 0);
far |= (u64)addr << 32;
vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
} else { /* !iabt */
kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
far &= GENMASK(63, 32);
far |= addr;
vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
}
/* * If AArch64 or LPAE, set FSC to 0 to indicate an Address * Size Fault at level 0, as if exceeding PARange. * * Non-LPAE guests will only get the external abort, as there * is no way to describe the ASF.
*/ if (vcpu_el1_is_32bit(vcpu) &&
!(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) return;
/** * kvm_inject_undefined - inject an undefined instruction into the guest * @vcpu: The vCPU in which to inject the exception * * It is assumed that this code is called from the VCPU thread and that the * VCPU therefore is not currently executing guest code.
*/ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{ if (vcpu_el1_is_32bit(vcpu))
inject_undef32(vcpu); else
inject_undef64(vcpu);
}
staticbool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
{ if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu)) returntrue;
if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA)) returnfalse;
/* * In another example where FEAT_DoubleFault2 is entirely backwards, * "masked" as it relates to the routing effects of HCRX_EL2.TMEA * doesn't consider SCTLR2_EL1.NMEA. That is to say, even if EL1 asked * for non-maskable SErrors, the EL2 bit takes priority if A is set.
*/ if (vcpu_mode_priv(vcpu)) return *vcpu_cpsr(vcpu) & PSR_A_BIT;
/* * Otherwise SErrors are considered unmasked when taken from EL0 and * NMEA is set.
*/ return serror_is_masked(vcpu);
}
/* * Emulate the exception entry if SErrors are unmasked. This is useful if * the vCPU is in a nested context w/ vSErrors enabled then we've already * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2, * VDISR_EL2) to the guest hypervisor. * * As we're emulating the SError injection we need to explicitly populate * ESR_ELx.EC because hardware will not do it on our behalf.
*/ if (!serror_is_masked(vcpu)) {
pend_serror_exception(vcpu);
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); return 1;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.