staticinlineint kvmppc_get_last_inst(struct kvm_vcpu *vcpu, enum instruction_fetch_type type, ppc_inst_t *inst)
{ int ret = EMULATE_DONE;
u32 fetched_inst;
/* Load the instruction manually if it failed to do so in the
* exit path */ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
/* Write fetch_failed unswapped if the fetch failed */ if (ret != EMULATE_DONE) {
*inst = ppc_inst(KVM_INST_FETCH_FAILED); return ret;
}
#ifdef CONFIG_PPC64 /* Is this a prefixed instruction? */ if ((vcpu->arch.last_inst >> 32) != 0) {
u32 prefix = vcpu->arch.last_inst >> 32;
u32 suffix = vcpu->arch.last_inst; if (kvmppc_need_byteswap(vcpu)) {
prefix = swab32(prefix);
suffix = swab32(suffix);
}
*inst = ppc_inst_prefix(prefix, suffix); return EMULATE_DONE;
} #endif
/* * Cuts out inst bits with ordering according to spec. * That means the leftmost bit is zero. All given bits are included.
*/ staticinline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
{
u32 r;
u32 mask;
/* * Replaces inst bits with ordering according to spec.
*/ staticinline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
{
u32 r;
u32 mask;
/* * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to * a CPU thread that's running/napping inside of a guest is by default regarded * as a request to wake the CPU (if needed) and continue execution within the * guest, potentially to process new state like externally-generated * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI). * * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the * target CPU's PACA. To avoid unnecessary exits to the host, this flag should * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on * the receiving side prior to processing the IPI work. * * NOTE: * * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi(). * This is to guard against sequences such as the following: * * CPU * X: smp_muxed_ipi_set_message(): * X: smp_mb() * X: message[RESCHEDULE] = 1 * X: doorbell_global_ipi(42): * X: kvmppc_set_host_ipi(42) * X: ppc_msgsnd_sync()/smp_mb() * X: ppc_msgsnd() -> 42 * 42: doorbell_exception(): // from CPU X * 42: ppc_msgsync() * 105: smp_muxed_ipi_set_message(): * 105: smb_mb() * // STORE DEFERRED DUE TO RE-ORDERING * --105: message[CALL_FUNCTION] = 1 * | 105: doorbell_global_ipi(42): * | 105: kvmppc_set_host_ipi(42) * | 42: kvmppc_clear_host_ipi(42) * | 42: smp_ipi_demux_relaxed() * | 42: // returns to executing guest * | // RE-ORDERED STORE COMPLETES * ->105: message[CALL_FUNCTION] = 1 * 105: ppc_msgsnd_sync()/smp_mb() * 105: ppc_msgsnd() -> 42 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored * 105: // hangs waiting on 42 to process messages/call_single_queue * * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is * to guard against sequences such as the following (as well as to create * a read-side pairing with the barrier in kvmppc_set_host_ipi()): * * CPU * X: smp_muxed_ipi_set_message(): * X: smp_mb() * X: message[RESCHEDULE] = 1 * X: doorbell_global_ipi(42): * X: kvmppc_set_host_ipi(42) * X: ppc_msgsnd_sync()/smp_mb() * X: ppc_msgsnd() -> 42 * 42: doorbell_exception(): // from CPU X * 42: ppc_msgsync() * // STORE DEFERRED DUE TO RE-ORDERING * -- 42: kvmppc_clear_host_ipi(42) * | 42: smp_ipi_demux_relaxed() * | 105: smp_muxed_ipi_set_message(): * | 105: smb_mb() * | 105: message[CALL_FUNCTION] = 1 * | 105: doorbell_global_ipi(42): * | 105: kvmppc_set_host_ipi(42) * | // RE-ORDERED STORE COMPLETES * -> 42: kvmppc_clear_host_ipi(42) * 42: // returns to executing guest * 105: ppc_msgsnd_sync()/smp_mb() * 105: ppc_msgsnd() -> 42 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored * 105: // hangs waiting on 42 to process messages/call_single_queue
*/ staticinlinevoid kvmppc_set_host_ipi(int cpu)
{ /* * order stores of IPI messages vs. setting of host_ipi flag * * pairs with the barrier in kvmppc_clear_host_ipi()
*/
smp_mb();
WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 1);
}
staticinlinevoid kvmppc_clear_host_ipi(int cpu)
{
WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 0); /* * order clearing of host_ipi flag vs. processing of IPI messages * * pairs with the barrier in kvmppc_set_host_ipi()
*/
smp_mb();
}
#ifdef CONFIG_KVM_XIVE /* * Below the first "xive" is the "eXternal Interrupt Virtualization Engine" * ie. P9 new interrupt controller, while the second "xive" is the legacy * "eXternal Interrupt Vector Entry" which is the configuration of an * interrupt on the "xics" interrupt controller on P8 and earlier. Those * two function consume or produce a legacy "XIVE" state from the * new "XIVE" interrupt controller.
*/ externint kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority); externint kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority); externint kvmppc_xive_int_on(struct kvm *kvm, u32 irq); externint kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
/* * Prototypes for functions called only from assembler code. * Having prototypes reduces sparse errors.
*/ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsignedlong liobn, unsignedlong ioba, unsignedlong tce); long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsignedlong liobn, unsignedlong ioba, unsignedlong tce_list, unsignedlong npages); long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, unsignedlong liobn, unsignedlong ioba, unsignedlong tce_value, unsignedlong npages); longint kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, unsignedint yield_count); long kvmppc_rm_h_random(struct kvm_vcpu *vcpu); void kvmhv_commence_exit(int trap); void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); void kvmppc_subcore_enter_guest(void); void kvmppc_subcore_exit_guest(void); long kvmppc_realmode_hmi_handler(void); long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu); long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsignedlong flags, long pte_index, unsignedlong pteh, unsignedlong ptel); long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsignedlong flags, unsignedlong pte_index, unsignedlong avpn); long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu); long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsignedlong flags, unsignedlong pte_index, unsignedlong avpn); long kvmppc_h_read(struct kvm_vcpu *vcpu, unsignedlong flags, unsignedlong pte_index); long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsignedlong flags, unsignedlong pte_index); long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsignedlong flags, unsignedlong pte_index); long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsignedlong flags, unsignedlong dest, unsignedlong src); long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsignedlong addr, unsignedlong slb_v, unsignedint status, bool data); void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
/* * Host-side operations we want to set up while running in real * mode in the guest operating on the xics. * Currently only VCPU wakeup is supported.
*/
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, struct kvm_config_tlb *cfg); int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, struct kvm_dirty_tlb *cfg);
long kvmppc_alloc_lpid(void); void kvmppc_free_lpid(long lpid); void kvmppc_init_lpid(unsignedlong nr_lpids);
staticinlinevoid kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
{ struct folio *folio; /* * We can only access pages that the kernel maps * as memory. Bail out for unmapped ones.
*/ if (!pfn_valid(pfn)) return;
/* Clear i-cache for new pages */
folio = page_folio(pfn_to_page(pfn)); if (!test_bit(PG_dcache_clean, &folio->flags)) {
flush_dcache_icache_folio(folio);
set_bit(PG_dcache_clean, &folio->flags);
}
}
/* * Shared struct helpers. The shared struct can be little or big endian, * depending on the guest endianness. So expose helpers to all of them.
*/ staticinlinebool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
{ #ifdefined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) /* Only Book3S_64 PR supports bi-endian for now */ return vcpu->arch.shared_big_endian; #elifdefined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) /* Book3s_64 HV on little endian is always little endian */ returnfalse; #else returntrue; #endif
}
staticinline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{ if (kvmppc_shared_big_endian(vcpu)) return be32_to_cpu(vcpu->arch.shared->sr[nr]); else return le32_to_cpu(vcpu->arch.shared->sr[nr]);
}
staticinlinevoid kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
{ if (kvmppc_shared_big_endian(vcpu))
vcpu->arch.shared->sr[nr] = cpu_to_be32(val); else
vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
}
/* * Please call after prepare_to_enter. This function puts the lazy ee and irq * disabled tracking state back to normal mode, without actually enabling * interrupts.
*/ staticinlinevoid kvmppc_fix_ee_before_entry(void)
{
trace_hardirqs_on();
#ifdef CONFIG_PPC64 /* * To avoid races, the caller must have gone directly from having * interrupts fully-enabled to hard-disabled.
*/
WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
irq_soft_mask_set(IRQS_ENABLED); #endif
}
staticinlinevoid kvmppc_fix_ee_after_exit(void)
{ #ifdef CONFIG_PPC64 /* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = PACA_IRQ_HARD_DIS;
irq_soft_mask_set(IRQS_ALL_DISABLED); #endif
trace_hardirqs_off();
}
staticinline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
{
ulong ea;
ulong msr_64bit = 0;
ea = kvmppc_get_gpr(vcpu, rb); if (ra)
ea += kvmppc_get_gpr(vcpu, ra);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.