/* Set the condition code in the guest program status word */ staticinlinevoid kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsignedlong cc)
{
vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
}
/* test availability of facility in a kvm instance */ staticinlineint test_kvm_facility(struct kvm *kvm, unsignedlong nr)
{ return __test_facility(nr, kvm->arch.model.fac_mask) &&
__test_facility(nr, kvm->arch.model.fac_list);
}
/* are cpu states controlled by user space */ staticinlineint kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
{ return kvm->arch.user_cpu_state_ctrl != 0;
}
staticinlinevoid kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
{ if (kvm->arch.user_cpu_state_ctrl) return;
VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
kvm->arch.user_cpu_state_ctrl = 1;
}
/* get the end gfn of the last (highest gfn) memslot */ staticinlineunsignedlong kvm_s390_get_gfn_end(struct kvm_memslots *slots)
{ struct rb_node *node; struct kvm_memory_slot *ms;
/** * __kvm_s390_pv_destroy_page() - Destroy a guest page. * @page: the page to destroy * * An attempt will be made to destroy the given guest page. If the attempt * fails, an attempt is made to export the page. If both attempts fail, an * appropriate error is returned. * * Context: must be called holding the mm lock for gmap->mm
*/ staticinlineint __kvm_s390_pv_destroy_page(struct page *page)
{ struct folio *folio = page_folio(page); int rc;
/* Large folios cannot be secure. Small folio implies FW_LEVEL_PTE. */ if (folio_test_large(folio)) return -EFAULT;
rc = uv_destroy_folio(folio); /* * Fault handlers can race; it is possible that two CPUs will fault * on the same secure page. One CPU can destroy the page, reboot, * re-enter secure mode and import it, while the second CPU was * stuck at the beginning of the handler. At some point the second * CPU will be able to progress, and it will not be able to destroy * the page. In that case we do not want to terminate the process, * we instead try to export the page.
*/ if (rc)
rc = uv_convert_from_secure_folio(folio);
/* implemented in intercept.c */
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu); int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); staticinlinevoid kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
{ struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
} staticinlinevoid kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
{
kvm_s390_rewind_psw(vcpu, -ilen);
} staticinlinevoid kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
{ /* don't inject PER events if we re-execute the instruction */
vcpu->arch.sie_block->icptstatus &= ~0x02;
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
}
int handle_sthyi(struct kvm_vcpu *vcpu);
/* implemented in priv.c */ int is_valid_psw(psw_t *psw); int kvm_s390_handle_aa(struct kvm_vcpu *vcpu); int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_e3(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); int kvm_s390_handle_01(struct kvm_vcpu *vcpu); int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
/* implemented in vsie.c */ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu); void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu); void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsignedlong start, unsignedlong end); void kvm_s390_vsie_init(struct kvm *kvm); void kvm_s390_vsie_destroy(struct kvm *kvm); int gmap_shadow_valid(struct gmap *sg, unsignedlong asce, int edat_level);
/* implemented in gmap-vsie.c */ struct gmap *gmap_shadow(struct gmap *parent, unsignedlong asce, int edat_level);
/* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/** * kvm_s390_inject_prog_cond - conditionally inject a program check * @vcpu: virtual cpu * @rc: original return/error code * * This function is supposed to be used after regular guest access functions * failed, to conditionally inject a program check to a vcpu. The typical * pattern would look like * * rc = write_guest(vcpu, addr, data, len); * if (rc) * return kvm_s390_inject_prog_cond(vcpu, rc); * * A negative return code from guest access functions implies an internal error * like e.g. out of memory. In these cases no program check should be injected * to the guest. * A positive value implies that an exception happened while accessing a guest's * memory. In this case all data belonging to the corresponding program check * has been stored in vcpu->arch.pgm and can be injected with * kvm_s390_inject_prog_irq(). * * Returns: - the original @rc value if @rc was negative (internal error) * - zero if @rc was already zero * - zero or error code from injecting if @rc was positive * (program check injected to @vcpu)
*/ staticinlineint kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
{ if (rc <= 0) return rc; return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
}
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, struct kvm_s390_irq *s390irq);
/* implemented in interrupt.c */ int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop); int psw_extint_disabled(struct kvm_vcpu *vcpu); void kvm_s390_destroy_adapters(struct kvm *kvm); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); externstruct kvm_device_ops kvm_flic_ops; int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *buf, int len); int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
__u8 __user *buf, int len); void kvm_s390_gisa_init(struct kvm *kvm); void kvm_s390_gisa_clear(struct kvm *kvm); void kvm_s390_gisa_destroy(struct kvm *kvm); void kvm_s390_gisa_disable(struct kvm *kvm); void kvm_s390_gisa_enable(struct kvm *kvm); int __init kvm_s390_gib_init(u8 nisc); void kvm_s390_gib_destroy(void);
/* implemented in guestdbg.c */ void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu); int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu); int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
/* support for Basic/Extended SCA handling */ staticinlineunion ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
{ struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
return &sca->ipte_control;
} staticinlineint kvm_s390_use_sca_entries(void)
{ /* * Without SIGP interpretation, only SRS interpretation (if available) * might use the entries. By not setting the entries and keeping them * invalid, hardware will not access them but intercept.
*/ return sclp.has_sigpif;
} void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, struct mcck_volatile_info *mcck_info);
/** * kvm_s390_vcpu_crypto_reset_all * * Reset the crypto attributes for each vcpu. This can be done while the vcpus * are running as each vcpu will be removed from SIE before resetting the crypt * attributes and restored to SIE afterward. * * Note: The kvm->lock must be held while calling this function * * @kvm: the KVM guest
*/ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
/** * kvm_s390_vcpu_pci_enable_interp * * Set the associated PCI attributes for each vcpu to allow for zPCI Load/Store * interpretation as well as adapter interruption forwarding. * * @kvm: the KVM guest
*/ void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm);
/** * diag9c_forwarding_hz * * Set the maximum number of diag9c forwarding per second
*/ externunsignedint diag9c_forwarding_hz;
#endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.