/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2012,2013 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * Derived from arch/arm/include/kvm_emulate.h * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
staticinlineint kvm_inject_serror(struct kvm_vcpu *vcpu)
{ /* * ESR_ELx.ISV (later renamed to IDS) indicates whether or not * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information. * * Set the bit when injecting an SError w/o an ESR to indicate ISS * does not follow the architected format.
*/ return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
}
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); int kvm_inject_nested_irq(struct kvm_vcpu *vcpu); int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr); int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr);
staticinlinevoid vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{ if (!vcpu_has_run_once(vcpu))
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
/* * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C * get set in SCTLR_EL1 such that we can detect when the guest * MMU gets turned on and do the necessary cache maintenance * then.
*/ if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
vcpu->arch.hcr_el2 |= HCR_TVM;
}
/* * vcpu_get_reg and vcpu_set_reg should always be passed a register number * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on * AArch32 with banked registers.
*/ static __always_inline unsignedlong vcpu_get_reg(conststruct kvm_vcpu *vcpu,
u8 reg_num)
{ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
}
/* * We are in a hypervisor context if the vcpu mode is EL2 or * E2H and TGE bits are set. The latter means we are in the user space * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost' * * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the * rest of the KVM code, and will result in a misbehaving guest.
*/ return vcpu_is_el2(vcpu) || (e2h && tge) || tge;
}
/* * The layout of SPSR for an AArch32 state is different when observed from an * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32 * view given an AArch64 view. * * In ARM DDI 0487E.a see: * * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280 * * Which show the following differences: * * | Bit | AA64 | AA32 | Notes | * +-----+------+------+-----------------------------| * | 24 | DIT | J | J is RES0 in ARMv8 | * | 21 | SS | DIT | SS doesn't exist in AArch32 | * * ... and all other bits are (currently) common.
*/ staticinlineunsignedlong host_spsr_to_spsr32(unsignedlong spsr)
{ constunsignedlong overlap = BIT(24) | BIT(21); unsignedlong dit = !!(spsr & PSR_AA32_DIT_BIT);
/* This one is not specific to Data Abort */ static __always_inline bool kvm_vcpu_trap_il_is32bit(conststruct kvm_vcpu *vcpu)
{ return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
}
staticinlinebool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{ if (kvm_vcpu_abt_iss1tw(vcpu)) { /* * Only a permission fault on a S1PTW should be * considered as a write. Otherwise, page tables baked * in a read-only memslot will result in an exception * being delivered in the guest. * * The drawback is that we end-up faulting twice if the * guest is using any of HW AF/DB: a translation fault * to map the page containing the PT (read only at * first), then a permission fault to allow the flags * to be set.
*/ return kvm_vcpu_trap_is_permission_fault(vcpu);
}
#define kvm_pend_exception(v, e) \ do { \
WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
vcpu_set_flag((v), PENDING_EXCEPTION); \
vcpu_set_flag((v), e); \
} while (0)
/* * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE * format if E2H isn't set.
*/ staticinline u64 vcpu_sanitised_cptr_el2(conststruct kvm_vcpu *vcpu)
{
u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
if (!vcpu_el2_e2h_is_set(vcpu))
cptr = translate_cptr_el2_to_cpacr_el1(cptr);
return cptr;
}
staticinlinebool ____cptr_xen_trap_enabled(conststruct kvm_vcpu *vcpu, unsignedint xen)
{ switch (xen) { case 0b00: case 0b10: returntrue; case 0b01: return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu); case 0b11: default: returnfalse;
}
}
if (cpus_have_final_cap(ARM64_HAS_HCX)) { /* * In general, all HCRX_EL2 bits are gated by a feature. * The only reason we can set SMPME without checking any * feature is that its effects are not directly observable * from the guest.
*/
vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
if (kvm_has_tcr2(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
if (kvm_has_fpmr(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
if (kvm_has_sctlr2(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
}
} #endif/* __ARM64_KVM_EMULATE_H__ */
Messung V0.5
¤ Dauer der Verarbeitung: 0.14 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.